ngram
listlengths
0
67.8k
[ "#<- end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val,", "group.flags) mesh_objects.append(obj) # save reference to current object log( '--Rigging:', data_group.bones and 'yes'", "in bone_set) else node.abs_transform.loc # the bone's length must not be 0, otherwise", "Draw.Exit() elif evt == 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif", "t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle # %i reordered:' % i,", "armature.envelopes = False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) #", "not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create log file (if needed)", "['Could not load geometry file. See log for details.']) return geometry = res.nodes[0].geometry", "20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES; optional,", "= 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node) in", "= Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log =", "+ s i+= 1 return s #--------------------------------------- # get active scene scene =", "bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...' )", "# log( '==Geometry Data Container Importer======' ) log( 'GMDC file:', gmdc_filename ) log(", "bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None else '' s", "for i, s in enumerate(items)), 0x100) b = choice_required and choice < 0", "reversed(w): del I[i] if T1: del T1[i] if T2: del T2[i] w =", "assert type(group.name) == str obj.addProperty('name', group.name) # Blender does not like Unicode here", "# not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def begin_import(): settings", "not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node file. See log for", "Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/)", "Vector as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- #", "to permit persons to whom the Software is # furnished to do so,", "pos as arithmetic mean v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n)", "mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node) in node_ids: _bone", "transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add armature (if any)", "t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t)", "# # add mesh objects (main geometry) # mesh_objects = [] for group", "transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) == 0: raise Exception() except:", "W)): for wi, j in enumerate(b): if wi == 3: f = 1.0", "T2): # create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) #", "geometry file. See log for details.']) return geometry = res.nodes[0].geometry log() transform_tree =", "= str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create log", "and this permission notice shall be included in # all copies or substantial", "Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35", "_bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys())", "(if any) # if transform_tree: bone_set = set(chain(*(group.bones or [] for group in", "f = 1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1)", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "import Vector as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings): #---------------------------------------", ") data_group = geometry.data_groups[group.data_group_index] # define index mapping S = {} # {", "= res.nodes[0].geometry log() transform_tree = None if cres_filename: # load skeleton log( 'Opening", "mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u,", "pos_y-= 30 # plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8)", "vertex on 3rd position # as well as degenerate triangles (i.e., less than", "v in t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2'", "modifier(s)...' ) # assign armature modifier # for obj in mesh_objects: modifier =", "j in enumerate(b): if wi == 3: f = 1.0 - sum(w) else:", "enumerate(geometry.dynamic_bmesh): if part: V, I = part S = {} # { old_index", "= [(T2[i], T2[j], T2[k]) for i, j, k in I] else: T2 =", "import bpy, Blender from Blender import Draw from Blender.Mathutils import Vector as BlenderVector", "transforms.'], choice_required=True) == 0: raise Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]'", "0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\",", "mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for", "# add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding", "else: T2 = None else: T1 = group.tex_coords and group.tex_coords[:] # copy or", "str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH = 200 #", "= [tuple(group.bones[j] for j in b) for b in B] dd = dict()", "transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name,", "settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones'] ) log() # load geometry", "= v # # add armature (if any) # if transform_tree: bone_set =", "get active scene scene = bpy.data.scenes.active # # add mesh objects (main geometry)", "group.tex_coords[:] # copy or None T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender", "triangles: %i)...' % (len(V), len(I)) ) # create mesh and add it to", "Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to", "b in B] dd = dict() # { index -> unique_bone_name } for", "S[k]) for i, j, k in group.indices] # filtering function def select_data(data): return", "b) for b in B] dd = dict() # { index -> unique_bone_name", "#--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global", "i, t, '->', I[i] ) if T1: uv1, uv2, uv3 = T1[i] T1[i]", "set_log_file(f) # # begin import # log( '==Geometry Data Container Importer======' ) log(", "the bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None else ''", "= i # map indices I = [(S[i], S[j], S[k]) for i, j,", "idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords # for f, t", "100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30,", "part: V, I = part S = {} # { old_index -> new_index", "w and display_menu('The file has a different set of inverse transforms. Replace?', ['Yes,", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "= T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t)) < 3: w.append(i) log(", "V, I = part S = {} # { old_index -> new_index }", "Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file()", "] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes", "BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add", "if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V, I =", "for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name", "log file \"%s\" for writing... ' % s ) try: f = open(s,", "try: f = open(s, 'w') except IOError as e: error(e) display_menu('Error!', ['Could not", ") try: res = load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception() res", "_save_log and 2 or 1) except: print_last_exception() res = False if not res", "i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for", "elif evt == 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt", "for i, x in enumerate(data) if i in S] V = select_data(data_group.vertices) #", "geometry = res.nodes[0].geometry log() transform_tree = None if cres_filename: # load skeleton log(", "script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0", "T1, T2) obj = scene.objects.new(mesh) obj.name = group.name # max - 21 characters", "# if data_group.keys: log( '--Adding shape keys...' ) keys = select_data(data_group.keys) dV =", "IOError as e: error(e) display_menu('Error!', ['Could not open log file for writing.']) return", "Draw from Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry,", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "1-v) for u, v in t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes,", "mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx,", "for idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I = part S =", "Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in", "== 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21:", "id(node) not in node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating armature...'", "for details.']) return log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree ) log()", "0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val", "but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign()", "log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1,", "must not be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v", "for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I =", "'removed' ) for i in reversed(w): del I[i] if T1: del T1[i] if", "20, pos_y, 400, 30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\", 20,", "v # # add armature (if any) # if transform_tree: bone_set = set(chain(*(group.bones", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "software and associated documentation files (the \"Software\"), to deal # in the Software", "{ old_index -> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i", "See log for details.']) else: # Ok log( 'Finished!' ) Blender.Redraw() # exit", "parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its children armature.bones[name]", "= None # shape keys # if data_group.keys: log( '--Adding shape keys...' )", "in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) #", "# since Blender recalculates normals, setting original normals is useless # instead, calculate", "and to permit persons to whom the Software is # furnished to do", "group.tex_coords2 and group.tex_coords2[:] # also, Blender does not like triangles with zero-index vertex", "browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20,", "Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "in enumerate(zip(B, W)): for wi, j in enumerate(b): if wi == 3: f", "transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute", "T2 = None else: T1 = group.tex_coords and group.tex_coords[:] # copy or None", "% (len(w)/7) ) if v != w and display_menu('The file has a different", "# 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape keys #", "bpy.data.scenes.active # # add mesh objects (main geometry) # mesh_objects = [] for", "log( 'Creating static bounding mesh...' ) V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh')", "obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh =", "0x11, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource", "all bones: ', settings['all_bones'] ) log() # load geometry log( 'Opening GMDC file", "# Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "val elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed", "normals, setting original normals is useless # instead, calculate normals mesh.calcNormals() if T1:", "items, choice_required=False): b = True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i)", "replace inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) == 0: raise", "T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender does not like triangles with", "log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name", "log( 'Opening GMDC file \"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log", "v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids and _n.bone_index", "mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals, setting original normals is", "res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!',", "See log for details.']) return geometry = res.nodes[0].geometry log() transform_tree = None if", "in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as", "doubles: ', settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones'] ) log() #", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name,", "######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False): b = True while b:", "if data_group.keys: log( '--Adding shape keys...' ) keys = select_data(data_group.keys) dV = map(select_data,", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import", "load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception() res = False if not", "_bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set are defined at the", "group.name) # Blender does not like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj)", "'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load geometry file. See", "= name = make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names =", "gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create log file (if needed) if", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "file\", 0x21, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 #", "v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] # assign vertices for i, (b,", "r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY:", "T1[j], T1[k]) for i, j, k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2)", "300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y,", "idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj)", "'CRES file:', cres_filename ) log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] )", "OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces, T2):", "T1[i] = (uv3, uv1, uv2) if T2: uv1, uv2, uv3 = T2[i] T2[i]", "btn_save_log pos_y = 230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75)", "k in I] else: T2 = None else: T1 = group.tex_coords and group.tex_coords[:]", "'==Geometry Data Container Importer======' ) log( 'GMDC file:', gmdc_filename ) log( 'CRES file:',", "data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) )", "setting original normals is useless # instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap')", "Ok log( 'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit']) ==", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "for i, j, k in I] else: T2 = None else: T1 =", "index -> unique_bone_name } for idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name", "pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300,", "'--Triangle # %i' % i, t, 'removed' ) for i in reversed(w): del", "j = key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1", "pos_y-= 30 # resource node file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y,", "to deal # in the Software without restriction, including without limitation the rights", "old_index -> new_index } j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x]", "to any person obtaining a copy # of this software and associated documentation", "add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone()", "1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape keys", "or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt): if evt", "# the bone's length must not be 0, otherwise Blender ignores it if", "######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V, I, T1,", "'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip()", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd =", "= val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY", "are defined at the bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if", "the bone's length must not be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len()", "'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals()", "= geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh'", "# %i reordered:' % i, t, '->', I[i] ) if T1: uv1, uv2,", "f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names =", "less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i,", "pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100,", "the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed =", "only in texture coordinates, then they are merged together (removes seams)\") btn_all_bones =", "Blender from Blender import Draw from Blender.Mathutils import Vector as BlenderVector ######################################## ##", "node = node.parent if node_ids: log( 'Creating armature...' ) log( '--Number of transform", "f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape", "for u, v in t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer", "mesh with dV # for i, key in _keys_f: j = key.index(idx) v", "active scene scene = bpy.data.scenes.active # # add mesh objects (main geometry) #", "['An error has occured. See log for details.']) else: # Ok log( 'Finished!'", "20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11,", "load resource node file. See log for details.']) return log() if _save_log: log(", "Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's log data into", "selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\",", "this software and associated documentation files (the \"Software\"), to deal # in the", "file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename =", "## GUI ######################################## def display_menu(caption, items, choice_required=False): b = True while b: choice", "- Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape keys # if data_group.keys:", "file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-=", "vertex groups...' ) # map bones B = [tuple(group.bones[j] for j in b)", "= [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids and _n.bone_index in", "= \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20,", "1) if res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and", "% s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts = mesh.key.blocks[-1].data", "pos_y, 100, 20, btn_save_log.val, \"Write script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-=", "return log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree ) log() try: if", "granted, free of charge, to any person obtaining a copy # of this", "log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] ) log(", "'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name =", "v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) #", "tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i) stored", "< 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent", "geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all bones:", "(CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open file", "20, btn_save_log.val, \"Write script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 #", "# { index -> unique_bone_name } for idx in group.bones: name = transform_tree", "# Permission is hereby granted, free of charge, to any person obtaining a", "and group.tex_coords[:] # copy or None T2 = group.tex_coords2 and group.tex_coords2[:] # also,", "armature, node_ids and bone_set are defined at the bottom def make_unique_bone_name(name, idx, collection):", "_n in node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)] v =", "Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for idx, part", "- 21 characters # save original name and flags assert type(group.name) == str", "error( 'Not a CRES file!' ) except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!',", "30 # GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20", "'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88", "in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for", "uv1, uv2, uv3 = T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t)) <", "#--------------------------------------- # get active scene scene = bpy.data.scenes.active # # add mesh objects", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "object log( '--Rigging:', data_group.bones and 'yes' or 'no' ) # rigging # if", "name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name, idx,", "GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename", "T2 = [(T2[i], T2[j], T2[k]) for i, j, k in I] else: T2", "= Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i", "add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set are defined at the bottom", "% (len(V), len(I)) ) # create mesh and add it to the scene", "# map bones B = [tuple(group.bones[j] for j in b) for b in", "group.tex_coords and group.tex_coords[:] # copy or None T2 = group.tex_coords2 and group.tex_coords2[:] #", "include all nodes down to root while node and id(node) not in node_ids:", "t[0], t[1]) log( '--Triangle # %i reordered:' % i, t, '->', I[i] )", "0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False): b", "filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key", "file. See log for details.']) return log() if _save_log: log( '==SKELETON==============================' ) log(", "otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05", "copies of the Software, and to permit persons to whom the Software is", "[dd.get(j) for j in xrange(max(dd)+1)] # assign vertices for i, (b, w) in", "add it to the scene mesh = create_mesh(group.name, V, I, T1, T2) obj", "in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add armature (if any) #", "create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log(", "3: w.append(i) log( '--Triangle # %i' % i, t, 'removed' ) for i", "uv2) if T2: uv1, uv2, uv3 = T2[i] T2[i] = (uv3, uv1, uv2)", "error has occured. See log for details.']) else: # Ok log( 'Finished!' )", "Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V, I,", "w = None log( '--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V),", "try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' )", "it to the scene mesh = create_mesh(group.name, V, I, T1, T2) obj =", "in enumerate(data) if i in S] V = select_data(data_group.vertices) # texture coords if", "file (optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20,", "as degenerate triangles (i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w", "add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] # assign", "and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0", "s # set name block_verts = mesh.key.blocks[-1].data # modify mesh with dV #", "'->', I[i] ) if T1: uv1, uv2, uv3 = T1[i] T1[i] = (uv3,", "else: node_ids = set() for j in bone_set: node = transform_tree.get_node(j) assert not", "create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender", "# subroutines def create_mesh(name, V, I, T1, T2): # create mesh # mesh", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from", "20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val,", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign()", "'Not a CRES file!' ) except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could", "0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\")", "and 2 or 1) except: print_last_exception() res = False if not res or", "= create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh) obj.name = group.name #", "scene mesh = create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh) obj.name =", "[(S[i], S[j], S[k]) for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name =", "and id(node) not in node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating", "# begin import # log( '==Geometry Data Container Importer======' ) log( 'GMDC file:',", "sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc # the bone's", "(t[2], t[0], t[1]) log( '--Triangle # %i reordered:' % i, t, '->', I[i]", "30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate", "mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I)) ) # create mesh", "or 1) if res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res", "a different set of inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep", "scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh", "Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #---------------------------------------", "= Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log = Draw.Create(0) Draw.Register(draw_gui, event_handler,", "(%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True", "if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if", "I[i] = (t[2], t[0], t[1]) log( '--Triangle # %i reordered:' % i, t,", "%i, triangles: %i)...' % (len(V), len(I)) ) # create mesh and add it", "False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature", "idx = '#%i'%idx if idx!=None else '' s = name[:30-len(idx)] + idx #", "['Select GMDC file.']) return # create log file (if needed) if _save_log: s", "Name: 'GMDC (.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC file'", "make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None", "'%08X' % group.flags) mesh_objects.append(obj) # save reference to current object log( '--Rigging:', data_group.bones", "s in enumerate(items)), 0x100) b = choice_required and choice < 0 return choice", "part in enumerate(geometry.dynamic_bmesh): if part: V, I = part S = {} #", "recalculates normals, setting original normals is useless # instead, calculate normals mesh.calcNormals() if", "= 'UVMap2' for f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for", "dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh'", "display_menu('Error!', ['An error has occured. See log for details.']) else: # Ok log(", "r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global", "envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def begin_import(): settings = {", "geometry.index_groups: log( 'Index group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] # define", "any person obtaining a copy # of this software and associated documentation files", "= set(chain(*(group.bones or [] for group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id,", "select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' ) # map", "are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100,", "100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save", "#------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted,", "name[:30-len(s)] + s i+= 1 return s #--------------------------------------- # get active scene scene", "try: res = load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception() res =", "{} # { old_index -> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x]", "# GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign()", "20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32,", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "a copy # of this software and associated documentation files (the \"Software\"), to", "0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's header s =", "log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree ) log() try: if settings['remove_doubles']:", "Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings):", "# w = [] for i, t in enumerate(I): if 0 == t[2]:", "for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use", "then they are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220,", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "s #--------------------------------------- # get active scene scene = bpy.data.scenes.active # # add mesh", "= select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i],", "settings): #--------------------------------------- # subroutines def create_mesh(name, V, I, T1, T2): # create mesh", "charge, to any person obtaining a copy # of this software and associated", "def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val =", "(v and node.bone_index in bone_set) else node.abs_transform.loc # the bone's length must not", "t: idx in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"'", "= 1 # return to basis #<- groups # # add bounding geometry", "scene.objects.new(mesh) obj.name = group.name # max - 21 characters # save original name", "T2[k]) for i, j, k in I] else: T2 = None else: T1", "has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v !=", "global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit() elif evt ==", "T1[k]) for i, j, k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2", "_bone) ## ## armature, node_ids and bone_set are defined at the bottom def", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "100, 20, btn_remove_doubles.val, \"If some vertices differ only in texture coordinates, then they", "except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # #", "if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry,", "#--------------------------------------- # subroutines def create_mesh(name, V, I, T1, T2): # create mesh #", "# event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename", "node_ids and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index", "# define index mapping S = {} # { old_index -> new_index }", "= scene.objects.new(mesh) obj.name = group.name # max - 21 characters # save original", "to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open file browser\")", "in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer", "from Blender import Draw from Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer", "pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-=", "+ '.import_log.txt' log( 'Opening log file \"%s\" for writing... ' % s )", "set name block_verts = mesh.key.blocks[-1].data # modify mesh with dV # for i,", "} j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot,", "% i, t, '->', I[i] ) if T1: uv1, uv2, uv3 = T1[i]", "for j in bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple) # include", "file\", 0x11, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 #", "geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids = set() for j", "than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t", "S = {} # { old_index -> new_index } for i, x in", "log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error", "data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i, j, k", "and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in", ") close_log_file() display_menu('Error!', ['Could not load geometry file. See log for details.']) return", "mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords # for f, t in", "add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) # assign armature", "idx!=None else '' s = name[:30-len(idx)] + idx # max - 31 characters", "elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for gui", "and flags assert type(group.name) == str obj.addProperty('name', group.name) # Blender does not like", "or 'no' ) # rigging # if data_group.bones: B = select_data(data_group.bones) W =", "node_ids = set() for j in bone_set: node = transform_tree.get_node(j) assert not isinstance(node,", "filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "'--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones'] ) log()", "display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def", "} _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename:", "j, k in group.indices] # filtering function def select_data(data): return [x for i,", "modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] =", "log( '--Rigging:', data_group.bones and 'yes' or 'no' ) # rigging # if data_group.bones:", "isinstance(node, tuple) # include all nodes down to root while node and id(node)", "groups # # add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating", "and error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load geometry", "as arithmetic mean v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "not load resource node file. See log for details.']) return log() if _save_log:", "for i, j, k in group.indices] # filtering function def select_data(data): return [x", "Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ only", "_save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\" for writing...", "v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # #", "DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted, free of charge, to any", "# for f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u,", "(if needed) if _save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening log file", "and choice < 0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones,", "# set name block_verts = mesh.key.blocks[-1].data # modify mesh with dV # for", "', settings['all_bones'] ) log() # load geometry log( 'Opening GMDC file \"%s\"...' %", "20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\",", "r_ctrl_key_pressed = val elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed):", "%i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names):", "return geometry = res.nodes[0].geometry log() transform_tree = None if cres_filename: # load skeleton", "# # Permission is hereby granted, free of charge, to any person obtaining", "TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # #", "global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH =", "= Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for idx,", "except: print_last_exception() display_menu('Error!', ['An error has occured. See log for details.']) else: #", "# exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ########################################", "'.import_log.txt' log( 'Opening log file \"%s\" for writing... ' % s ) try:", "3rd position # as well as degenerate triangles (i.e., less than 3 different", "else: res and error( 'Not a CRES file!' ) except: print_last_exception() if not", "not in node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating armature...' )", "# create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log", "20, btn_remove_doubles.val, \"If some vertices differ only in texture coordinates, then they are", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "not be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v =", "idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I = part S = {}", "(https://github.com/djalex88/) # # Permission is hereby granted, free of charge, to any person", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "create log file (if needed) if _save_log: s = gmdc_filename + '.import_log.txt' log(", "file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20,", "0 == t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle # %i reordered:'", "I, T1, T2) obj = scene.objects.new(mesh) obj.name = group.name # max - 21", "evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for gui elements", "BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def", "of the Software, and to permit persons to whom the Software is #", "= (t[2], t[0], t[1]) log( '--Triangle # %i reordered:' % i, t, '->',", "triangles (i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = []", "in b) for b in B] dd = dict() # { index ->", "select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)'", "Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature)", "% cres_filename ) try: res = load_resource(cres_filename, _save_log and 2 or 1) if", "save reference to current object log( '--Rigging:', data_group.bones and 'yes' or 'no' )", "0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES;", "elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY and val", "btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20,", "= {} # { old_index -> new_index } j = len(mesh.verts) for i,", "or 'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) # add vertex group", "values for gui elements and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename =", "inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w", "if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone", "group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] # assign vertices for", "Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY:", "dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey() for", "S[j], S[k]) for i, j, k in group.indices] # filtering function def select_data(data):", "display_menu('Error!', ['Could not load geometry file. See log for details.']) return geometry =", "scene.properties['gmdc_inverse_transforms'] = v # # add armature (if any) # if transform_tree: bone_set", "= _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set are defined at", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "s = '.%i'%i + idx s = name[:30-len(s)] + s i+= 1 return", "obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference to current object log( '--Rigging:',", "if T1: uv1, uv2, uv3 = T1[i] T1[i] = (uv3, uv1, uv2) if", "0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30", "20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100,", "recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-=", "[] for group in geometry.index_groups: log( 'Index group \"%s\"' % group.name ) data_group", "set default values for gui elements and run event loop str_gmdc_filename = Draw.Create(\"\")", "val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt ==", "writing... ' % s ) try: f = open(s, 'w') except IOError as", "occured. See log for details.']) else: # Ok log( 'Finished!' ) Blender.Redraw() #", "OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools", "obj = scene.objects.new(mesh) obj.name = group.name # max - 21 characters # save", "if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj =", "w) in enumerate(zip(B, W)): for wi, j in enumerate(b): if wi == 3:", "Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "from gmdc_tools import * from itertools import chain import bpy, Blender from Blender", "Blender import Draw from Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer ########################################", "Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False): b =", "% len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType", "V = select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 =", "= Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log(", "not load geometry file. See log for details.']) return geometry = res.nodes[0].geometry log()", "in S] I = [(S[i], S[j], S[k]) for i, j, k in I]", "does not like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference", "r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt): if evt ==", "in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f: s", "Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean v =", "'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit()", "to basis #<- groups # # add bounding geometry # if settings['import_bmesh']: if", "20, str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select", "in enumerate(I): if 0 == t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle", "= '.%i'%i + idx s = name[:30-len(s)] + s i+= 1 return s", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc", "220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\") btn_save_log", "mesh.key.blocks[-1].data # modify mesh with dV # for i, key in _keys_f: j", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "v != w and display_menu('The file has a different set of inverse transforms.", "# basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx", "w.append(i) log( '--Triangle # %i' % i, t, 'removed' ) for i in", "Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select", "btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms;", "mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load", "31 characters (?) i = 1 while s in collection: s = '.%i'%i", "node file (optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\",", "resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100,", "vertices differ only in texture coordinates, then they are merged together (removes seams)\")", "'--Import all bones: ', settings['all_bones'] ) log() # load geometry log( 'Opening GMDC", "complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption,", "# if transform_tree: bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups))) if", "T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i, j, k in", "print_last_exception() display_menu('Error!', ['An error has occured. See log for details.']) else: # Ok", "120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ only in texture coordinates,", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "transforms (if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms'])", "# # load inverse transforms (if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms)))", "3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t in", "i # map indices I = [(S[i], S[j], S[k]) for i, j, k", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "copy or None T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender does not", "res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not", "I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name,", "prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI", "collection: s = '.%i'%i + idx s = name[:30-len(s)] + s i+= 1", "if v != w and display_menu('The file has a different set of inverse", "j, k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j],", "name = transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name)", "is hereby granted, free of charge, to any person obtaining a copy #", "for i, t in enumerate(I): if 0 == t[2]: I[i] = (t[2], t[0],", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D ->", "return to basis #<- groups # # add bounding geometry # if settings['import_bmesh']:", "else node.abs_transform.loc # the bone's length must not be 0, otherwise Blender ignores", "geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V, I = geometry.static_bmesh mesh =", "bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC", "log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ',", "btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log = Draw.Create(0) Draw.Register(draw_gui,", "name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its children armature.bones[name] =", "_bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean", "# also, Blender does not like triangles with zero-index vertex on 3rd position", "r_ctrl_key_pressed = 0 def button_events(evt): if evt == 0: Draw.Exit() elif evt ==", "gmdc_tools import * from itertools import chain import bpy, Blender from Blender import", "= select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i, j, k in I]", "try: res = load_resource(cres_filename, _save_log and 2 or 1) if res and res.nodes[0].type", "so, subject to the following conditions: # # The above copyright notice and", "add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...'", "= T1[i] T1[i] = (uv3, uv1, uv2) if T2: uv1, uv2, uv3 =", "bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If", "# compute tail pos as arithmetic mean v = [_n.abs_transform.loc for _n in", "I = part S = {} # { old_index -> new_index } j", ") scene.properties['gmdc_inverse_transforms'] = v # # add armature (if any) # if transform_tree:", "copy # of this software and associated documentation files (the \"Software\"), to deal", ") V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh)", "# add mesh objects (main geometry) # mesh_objects = [] for group in", "T1 = group.tex_coords and group.tex_coords[:] # copy or None T2 = group.tex_coords2 and", "T1[i] T1[i] = (uv3, uv1, uv2) if T2: uv1, uv2, uv3 = T2[i]", "log( '--Number of transform nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes", "# Blender does not like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) #", "Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0,", "log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones'] )", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #-------------------------------------------------------------------------------", "log( 'Scene already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) )", "uv2, uv3 = T1[i] T1[i] = (uv3, uv1, uv2) if T2: uv1, uv2,", "pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320,", "gui elements and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh", "'--Number of transform nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes =", "= False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create", "choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230", "modify mesh with dV # for i, key in _keys_f: j = key.index(idx)", "(%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w and display_menu('The", "sublicense, and/or sell # copies of the Software, and to permit persons to", "and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0)", "_save_log and 2 or 1) if res and res.nodes[0].type == 'cResourceNode': transform_tree =", "V, I, T1, T2) obj = scene.objects.new(mesh) obj.name = group.name # max -", "v_group_names = dd = None # shape keys # if data_group.keys: log( '--Adding", "Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or", "and node.bone_index in bone_set) else node.abs_transform.loc # the bone's length must not be", "= set() for j in bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple)", "def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit()", "not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def begin_import(): settings =", "dict() # { index -> unique_bone_name } for idx in group.bones: name =", "script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\",", "def display_menu(caption, items, choice_required=False): b = True while b: choice = Draw.PupMenu('%s%%t|'%caption +", "armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) # assign armature modifier #", "T2: del T2[i] w = None log( '--Creating mesh object (vertices: %i, triangles:", "name = make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j)", "== 0: raise Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms']", "save original name and flags assert type(group.name) == str obj.addProperty('name', group.name) # Blender", "armature.update() log( '--Adding armature modifier(s)...' ) # assign armature modifier # for obj", "'Opening GMDC file \"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and", "', settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones'] ) log() # load", "x in enumerate(data) if i in S] V = select_data(data_group.vertices) # texture coords", "res = load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception() res = False", "(len(w)/7) ) if v != w and display_menu('The file has a different set", "s = gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\" for writing... '", "(l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt): if", "# copies of the Software, and to permit persons to whom the Software", "useless # instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords", "node_ids: log( 'Creating armature...' ) log( '--Number of transform nodes (%i)' % len(node_ids)", "loaded inverse transforms.'], choice_required=True) == 0: raise Exception() except: log( 'Saving inverse transforms", "def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node) in node_ids: _bone =", "as well as degenerate triangles (i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend", "20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y,", "IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools import chain", "with dV # for i, key in _keys_f: j = key.index(idx) v =", "this permission notice shall be included in # all copies or substantial portions", "Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted, free of", "in _keys_f: j = key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape", "modifier # for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True", "= filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and", "else: T1 = group.tex_coords and group.tex_coords[:] # copy or None T2 = group.tex_coords2", "characters # save original name and flags assert type(group.name) == str obj.addProperty('name', group.name)", "map bones B = [tuple(group.bones[j] for j in b) for b in B]", "node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating armature...' ) log( '--Number of", "for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1], enumerate(keys))", "any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene", "global cres_filename str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt", "dV # for i, key in _keys_f: j = key.index(idx) v = dV[j]", "# as well as degenerate triangles (i.e., less than 3 different indices): #", "build_transform_tree(res.nodes) else: res and error( 'Not a CRES file!' ) except: print_last_exception() if", "Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles =", "= True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT", "# add armature (if any) # if transform_tree: bone_set = set(chain(*(group.bones or []", "# https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t in enumerate(I): if 0", "30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200, 20)", "MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open", "index mapping S = {} # { old_index -> new_index } for i,", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename,", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object", "# if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already", "loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x", "define index mapping S = {} # { old_index -> new_index } for", "I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name =", "pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\") btn_save_log =", "bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple) # include all nodes down", "scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w and display_menu('The file has a", "= scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable()", "mesh and add it to the scene mesh = create_mesh(group.name, V, I, T1,", "def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log", "I[i] if T1: del T1[i] if T2: del T2[i] w = None log(", "= [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in S] I =", "basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in", "1 while s in collection: s = '.%i'%i + idx s = name[:30-len(s)]", "= 1 while s in collection: s = '.%i'%i + idx s =", "load inverse transforms (if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w", "Blender does not like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save", "'--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import", "print_last_exception() res = False if not res or res.nodes[0].type != 'cGeometryDataContainer': res and", "keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i,", "if 0 == t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle # %i", "itertools import chain import bpy, Blender from Blender import Draw from Blender.Mathutils import", "for details.']) return geometry = res.nodes[0].geometry log() transform_tree = None if cres_filename: #", ") armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK", "details.']) return log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree ) log() try:", "whom the Software is # furnished to do so, subject to the following", "= True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s", "0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 #", "= Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals, setting original", "shape keys # if data_group.keys: log( '--Adding shape keys...' ) keys = select_data(data_group.keys)", "in geometry.index_groups: log( 'Index group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] #", "= gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\" for writing... ' %", "#!BPY \"\"\" Name: 'GMDC (.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2", "-> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces,", "display_menu(caption, items, choice_required=False): b = True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s,", "Data Container Importer======' ) log( 'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "Importer======' ) log( 'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename ) log(", "obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False #", "# instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords #", "= filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log(", "error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load geometry file.", "is # furnished to do so, subject to the following conditions: # #", "# max - 31 characters (?) i = 1 while s in collection:", "= 0 r_ctrl_key_pressed = 0 def button_events(evt): if evt == 0: Draw.Exit() elif", "[x for i, x in enumerate(data) if i in S] V = select_data(data_group.vertices)", "CRES file!' ) except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not load", "armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY", "(node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone:", "or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1)", "'Creating armature...' ) log( '--Number of transform nodes (%i)' % len(node_ids) ) armature", "0 def button_events(evt): if evt == 0: Draw.Exit() elif evt == 1: begin_import()", "1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1", "Container Importer======' ) log( 'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename )", "optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open file browser\")", "if res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error(", "\"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby", "200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20,", "s = name[:30-len(idx)] + idx # max - 31 characters (?) i =", ") log() # load geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename )", "# buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry (Ctrl +", "= 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def", "node in transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple())", "= group.tex_coords2 and group.tex_coords2[:] # also, Blender does not like triangles with zero-index", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "i+= 1 return s #--------------------------------------- # get active scene scene = bpy.data.scenes.active #", "f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v in", "scene = bpy.data.scenes.active # # add mesh objects (main geometry) # mesh_objects =", "transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node file. See log for details.'])", "= tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' %", "defined at the bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None", "name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names", "+ idx s = name[:30-len(s)] + s i+= 1 return s #--------------------------------------- #", "bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100,", "or None T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender does not like", "loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1)", "# Ok set_log_file(f) # # begin import # log( '==Geometry Data Container Importer======'", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "group in geometry.index_groups: log( 'Index group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index]", "(vertices: %i, triangles: %i)...' % (len(V), len(I)) ) # create mesh and add", "evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- #", "= node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name", "None T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender does not like triangles", "and error( 'Not a CRES file!' ) except: print_last_exception() if not transform_tree: close_log_file()", "or [] for group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else:", "# assign vertices for i, (b, w) in enumerate(zip(B, W)): for wi, j", "0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ only in texture", "T1 = [(T1[i], T1[j], T1[k]) for i, j, k in I] if data_group.tex_coords2:", "% i, t, 'removed' ) for i in reversed(w): del I[i] if T1:", "Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values", "characters (?) i = 1 while s in collection: s = '.%i'%i +", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "1) except: print_last_exception() res = False if not res or res.nodes[0].type != 'cGeometryDataContainer':", "'==SKELETON==============================' ) log( transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing doubles...' )", "filtering function def select_data(data): return [x for i, x in enumerate(data) if i", "v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc #", "indices I = [(S[i], S[j], S[k]) for i, j, k in group.indices] #", "default values for gui elements and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename", "data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to vertex groups...'", "if T1: mesh.addUVLayer('UVMap') # assign texture coords # for f, t in zip(mesh.faces,", "for j in b) for b in B] dd = dict() # {", "resource node file. See log for details.']) return log() if _save_log: log( '==SKELETON=============================='", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools", "'GMDC (.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\"", "furnished to do so, subject to the following conditions: # # The above", "i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I = [(S[i],", "# { old_index -> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] =", "file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is", "pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES; optional, but", "settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val)", "as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines", "T2[i] w = None log( '--Creating mesh object (vertices: %i, triangles: %i)...' %", "node and id(node) not in node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log(", "at the bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None else", "= 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh')", "for idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] =", "scene.update() #<- end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones':", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f:", "Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v #", "\"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts =", ") log( 'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename ) log( 'Settings:'", "header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30)", "be included in # all copies or substantial portions of the Software. #", "bone_set) else node.abs_transform.loc # the bone's length must not be 0, otherwise Blender", "uv3 = T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t)) < 3: w.append(i)", "!= w and display_menu('The file has a different set of inverse transforms. Replace?',", "transform_tree)) else: node_ids = set() for j in bone_set: node = transform_tree.get_node(j) assert", "w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]'", "35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20,", "= make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its children armature.bones[name] = _bone", "len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType =", "GMDC file.']) return # create log file (if needed) if _save_log: s =", "in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w and display_menu('The file has", "in B] dd = dict() # { index -> unique_bone_name } for idx", "geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some", "select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i, j, k in I] else:", "for group in geometry.index_groups: log( 'Index group \"%s\"' % group.name ) data_group =", "45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry (Ctrl", "armature modifier # for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] =", "make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None else '' s = name[:30-len(idx)]", "B = [tuple(group.bones[j] for j in b) for b in B] dd =", "# max - 21 characters # save original name and flags assert type(group.name)", "transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0,", "cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create", "mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names)", "None mesh_objects.append(obj) # # load inverse transforms (if any) # if geometry.inverse_transforms: v", "details.']) else: # Ok log( 'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import", "s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts = mesh.key.blocks[-1].data #", "s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s,", "vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update()", ") log( '--Import all bones: ', settings['all_bones'] ) log() # load geometry log(", "bones: ', settings['all_bones'] ) log() # load geometry log( 'Opening GMDC file \"%s\"...'", "20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val,", "'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename =", "data_group.bones and 'yes' or 'no' ) # rigging # if data_group.bones: B =", "l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt): if evt == 0: Draw.Exit()", "enumerate(b): if wi == 3: f = 1.0 - sum(w) else: f =", "use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj", "file. See log for details.']) return geometry = res.nodes[0].geometry log() transform_tree = None", ") log( 'CRES file:', cres_filename ) log( 'Settings:' ) log( '--Import bounding geometry:',", "file \"%s\" for writing... ' % s ) try: f = open(s, 'w')", "|= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' )", "(Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the script (Esc)\")", "rigging # if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices", "= bpy.data.scenes.active # # add mesh objects (main geometry) # mesh_objects = []", "armature (if any) # if transform_tree: bone_set = set(chain(*(group.bones or [] for group", "%i)...' % (len(V), len(I)) ) # create mesh and add it to the", "res and error( 'Not a CRES file!' ) except: print_last_exception() if not transform_tree:", "mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm.", "= tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i)", "s = name[:30-len(s)] + s i+= 1 return s #--------------------------------------- # get active", "## ## armature, node_ids and bone_set are defined at the bottom def make_unique_bone_name(name,", "%i' % i, t, 'removed' ) for i in reversed(w): del I[i] if", "dd = dict() # { index -> unique_bone_name } for idx in group.bones:", "= (uv3, uv1, uv2) if T2: uv1, uv2, uv3 = T2[i] T2[i] =", "str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt, val):", "## armature, node_ids and bone_set are defined at the bottom def make_unique_bone_name(name, idx,", "down to root while node and id(node) not in node_ids: node_ids.add(id(node)) node =", "chain import bpy, Blender from Blender import Draw from Blender.Mathutils import Vector as", "= group.name # max - 21 characters # save original name and flags", "0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent =", "enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I = [(S[i], S[j], S[k]) for", "load skeleton log( 'Opening CRES file \"%s\"...' % cres_filename ) try: res =", "Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 # GMDC", "-> unique_bone_name } for idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name or", "+ idx # max - 31 characters (?) i = 1 while s", "seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all", "1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load inverse transforms (if", "{ index -> unique_bone_name } for idx in group.bones: name = transform_tree and", "create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has occured. See log for", "for writing... ' % s ) try: f = open(s, 'w') except IOError", "= val elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import()", "j in xrange(max(dd)+1)] # assign vertices for i, (b, w) in enumerate(zip(B, W)):", "val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit() elif evt", "copyright notice and this permission notice shall be included in # all copies", "btn_save_log.val, \"Write script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons", "deal # in the Software without restriction, including without limitation the rights #", "GUI ######################################## def display_menu(caption, items, choice_required=False): b = True while b: choice =", "elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed =", "'::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set", "in node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)] v = sum(v,", "# assign armature modifier # for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS", "[] for group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids", "i, j, k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i],", "0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for gui elements and run", ".5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- #", "file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100,", "file \"%s\"...' % cres_filename ) try: res = load_resource(cres_filename, _save_log and 2 or", "j in b) for b in B] dd = dict() # { index", "log( '--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I)) ) #", "# rigging # if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning", "# load inverse transforms (if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try:", "bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc", ") try: res = load_resource(cres_filename, _save_log and 2 or 1) if res and", "only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's", "I = [(S[i], S[j], S[k]) for i, j, k in group.indices] # filtering", "well as degenerate triangles (i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend #", "\"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open file", "arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...'", "scene scene = bpy.data.scenes.active # # add mesh objects (main geometry) # mesh_objects", "file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20, \"Open", "wi, j in enumerate(b): if wi == 3: f = 1.0 - sum(w)", "part S = {} # { old_index -> new_index } j = len(mesh.verts)", "add bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature,", "'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except:", "if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces, T2): f.uv", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "position # as well as degenerate triangles (i.e., less than 3 different indices):", "to whom the Software is # furnished to do so, subject to the", "= bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select", "assign texture coords # for f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u,", "for node in transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head =", "i in S] V = select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 =", "for f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v", "20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound.", "s i+= 1 return s #--------------------------------------- # get active scene scene = bpy.data.scenes.active", "indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t in enumerate(I): if", "uv1, uv2, uv3 = T1[i] T1[i] = (uv3, uv1, uv2) if T2: uv1,", "Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding", "max - 21 characters # save original name and flags assert type(group.name) ==", "GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load geometry file. See log for", "0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\",", "node.bone_index in bone_set) else node.abs_transform.loc # the bone's length must not be 0,", "zero-index vertex on 3rd position # as well as degenerate triangles (i.e., less", "and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt", "if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has", "and 2 or 1) if res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes)", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "log( 'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name", "Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y,", "reordered:' % i, t, '->', I[i] ) if T1: uv1, uv2, uv3 =", "_keys_f: j = key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape =", "needed) if _save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\"", "'UVMap2' for f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u,", "and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(),", "W = select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' ) # map bones", "obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex", "THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools import chain import bpy,", "x in enumerate(V) if i in S] I = [(S[i], S[j], S[k]) for", "btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\")", "100, 20, btn_save_log.val, \"Write script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45", "set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename", "= (uv3, uv1, uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle # %i'", "btn_remove_doubles.val, \"If some vertices differ only in texture coordinates, then they are merged", "begin import # log( '==Geometry Data Container Importer======' ) log( 'GMDC file:', gmdc_filename", "pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120,", "mesh_objects.append(obj) # save reference to current object log( '--Rigging:', data_group.bones and 'yes' or", "arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones", "gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.'])", "load geometry file. See log for details.']) return geometry = res.nodes[0].geometry log() transform_tree", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids = set() for", "log( '--Triangle # %i' % i, t, 'removed' ) for i in reversed(w):", "sell # copies of the Software, and to permit persons to whom the", "and bone_set are defined at the bottom def make_unique_bone_name(name, idx, collection): idx =", "btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH = 200 # frame", "make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j", "for wi, j in enumerate(b): if wi == 3: f = 1.0 -", "# # add armature (if any) # if transform_tree: bone_set = set(chain(*(group.bones or", "# all copies or substantial portions of the Software. # # THE SOFTWARE", "== 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False):", "armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set are defined", "== 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set", "\"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the", "for i, x in enumerate(V) if i in S] I = [(S[i], S[j],", "== 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for gui elements and", "if T2: del T2[i] w = None log( '--Creating mesh object (vertices: %i,", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names", "= make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name) mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names =", "\"Import all bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320,", ") try: f = open(s, 'w') except IOError as e: error(e) display_menu('Error!', ['Could", "if part: V, I = part S = {} # { old_index ->", "= build_transform_tree(res.nodes) else: res and error( 'Not a CRES file!' ) except: print_last_exception()", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes,", "T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle", "armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400,", "btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34,", "files (the \"Software\"), to deal # in the Software without restriction, including without", "2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted, free of charge, to", "# add bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ##", "in I] else: T2 = None else: T1 = group.tex_coords and group.tex_coords[:] #", "used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val,", "= 'b_mesh' v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh): if part: V,", "obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for idx, part in", "= Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100) b =", "_save_log: log( '==SKELETON==============================' ) log( transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing", "== Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val", "= scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh):", "following conditions: # # The above copyright notice and this permission notice shall", "Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node file", "keys # if data_group.keys: log( '--Adding shape keys...' ) keys = select_data(data_group.keys) dV", "% group.name ) data_group = geometry.data_groups[group.data_group_index] # define index mapping S = {}", "%i, %i, %i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx, s", "mesh_objects.append(obj) # # load inverse transforms (if any) # if geometry.inverse_transforms: v =", "tail pos as arithmetic mean v = [_n.abs_transform.loc for _n in node.child_nodes if", "# copy or None T2 = group.tex_coords2 and group.tex_coords2[:] # also, Blender does", "# return to basis #<- groups # # add bounding geometry # if", "'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if", "The above copyright notice and this permission notice shall be included in #", "bone_set are defined at the bottom def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx", "v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone", "'--Adding armature modifier(s)...' ) # assign armature modifier # for obj in mesh_objects:", "btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip()", "# texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k])", "tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f =", "settings) except: print_last_exception() display_menu('Error!', ['An error has occured. See log for details.']) else:", "cres_filename str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt ==", "def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V, I, T1, T2):", "['Yes, replace inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) == 0:", "static bounding mesh...' ) V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I)", "_keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name =", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "'w') except IOError as e: error(e) display_menu('Error!', ['Could not open log file for", "file:', cres_filename ) log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] ) log(", "_keys_f = filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s)", "together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val,", "log( 'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename ) log( 'Settings:' )", "normals is useless # instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign", "btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's log", "(main geometry) # mesh_objects = [] for group in geometry.index_groups: log( 'Index group", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "[tuple(group.bones[j] for j in b) for b in B] dd = dict() #", "x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I = [(S[i], S[j],", "new_index } j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j", "obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...'", "file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31,", "file.']) return # create log file (if needed) if _save_log: s = gmdc_filename", "mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] # assign vertices for i,", "of inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep previously loaded inverse", "= [] for i, t in enumerate(I): if 0 == t[2]: I[i] =", "*.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30,", "evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY and val and", ") Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally:", "S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V =", "\"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y,", "# save original name and flags assert type(group.name) == str obj.addProperty('name', group.name) #", "notice shall be included in # all copies or substantial portions of the", "'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add armature", "= None log( '--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I))", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "assign armature modifier # for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ]", "'Select') #------------------------------------------------------------------------------- # set default values for gui elements and run event loop", "return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node) in node_ids:", "e: error(e) display_menu('Error!', ['Could not open log file for writing.']) return # Ok", "GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission", "1, 120, pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220,", "transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has occured. See log for details.'])", "arm_obj scene.update() #<- end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val,", "# options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val,", "'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles']", "None if cres_filename: # load skeleton log( 'Opening CRES file \"%s\"...' % cres_filename", "= '#%i'%idx if idx!=None else '' s = name[:30-len(idx)] + idx # max", "enumerate(zip(B, W)): for wi, j in enumerate(b): if wi == 3: f =", "######################################## ## Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name,", "in collection: s = '.%i'%i + idx s = name[:30-len(s)] + s i+=", "node file. See log for details.']) return log() if _save_log: log( '==SKELETON==============================' )", "rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in S]", "idx in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' %", "l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename", "f = open(s, 'w') except IOError as e: error(e) display_menu('Error!', ['Could not open", "inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add armature (if", "30 # plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10,", "'#%i'%idx if idx!=None else '' s = name[:30-len(idx)] + idx # max -", "[i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None #", "it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple())", "in xrange(max(dd)+1)] # assign vertices for i, (b, w) in enumerate(zip(B, W)): for", "node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index,", "its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set", ") geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!',", "begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt): if evt == 0:", "mesh.assignVertsToGroup(name, S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load inverse", "open(s, 'w') except IOError as e: error(e) display_menu('Error!', ['Could not open log file", "enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V", "xrange(max(dd)+1)] # assign vertices for i, (b, w) in enumerate(zip(B, W)): for wi,", "in node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating armature...' ) log(", "'Opening CRES file \"%s\"...' % cres_filename ) try: res = load_resource(cres_filename, _save_log and", "320, pos_y, 100, 20, btn_save_log.val, \"Write script's log data into file *.import_log.txt\") Draw.EndAlign()", "Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc # the bone's length", "enumerate(V) if i in S] I = [(S[i], S[j], S[k]) for i, j,", "armature...' ) log( '--Number of transform nodes (%i)' % len(node_ids) ) armature =", "evt == 0: Draw.Exit() elif evt == 1: begin_import() elif evt == 0x11:", "run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles", "0 r_ctrl_key_pressed = 0 def button_events(evt): if evt == 0: Draw.Exit() elif evt", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "# frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's", "name[:30-len(idx)] + idx # max - 31 characters (?) i = 1 while", "j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc", "log for details.']) return geometry = res.nodes[0].geometry log() transform_tree = None if cres_filename:", "gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\" for writing... ' % s", "log( '--Import all bones: ', settings['all_bones'] ) log() # load geometry log( 'Opening", "set() for j in bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple) #", "f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v in", "pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry", "% tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f", "= str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return", "mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if", "for u, v in t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None):", "original normals is useless # instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') #", "obj.name = 'b_mesh' v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh): if part:", "is useless # instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture", "USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import *", "- sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 -", "to the following conditions: # # The above copyright notice and this permission", "t in enumerate(I): if 0 == t[2]: I[i] = (t[2], t[0], t[1]) log(", "= key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 #", "triangles with zero-index vertex on 3rd position # as well as degenerate triangles", "dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to basis #<-", "= dict() # { index -> unique_bone_name } for idx in group.bones: name", "and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not a", "idx s = name[:30-len(s)] + s i+= 1 return s #--------------------------------------- # get", "compute tail pos as arithmetic mean v = [_n.abs_transform.loc for _n in node.child_nodes", "# include all nodes down to root while node and id(node) not in", "j, k in I] else: T2 = None else: T1 = group.tex_coords and", "objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has occured. See", "transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) # add vertex", "#------------------------------------------------------------------------------- from gmdc_tools import * from itertools import chain import bpy, Blender from", "mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES]", "object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature", "i, x in enumerate(V) if i in S] I = [(S[i], S[j], S[k])", "v in t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node", "k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k])", "log( 'CRES file:', cres_filename ) log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh']", "t, '->', I[i] ) if T1: uv1, uv2, uv3 = T1[i] T1[i] =", "= filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt, val): global", "log( '==SKELETON==============================' ) log( transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing doubles...'", "to resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y,", "name and flags assert type(group.name) == str obj.addProperty('name', group.name) # Blender does not", "Software is # furnished to do so, subject to the following conditions: #", "'--Adding shape keys...' ) keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length", "-> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map", "'\\x20\\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) ) # basis", "nodes down to root while node and id(node) not in node_ids: node_ids.add(id(node)) node", "== t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle # %i reordered:' %", "# assign texture coords # for f, t in zip(mesh.faces, T1): f.uv =", "arithmetic mean v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids", "for i, j, k in I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 =", "display_menu('The file has a different set of inverse transforms. Replace?', ['Yes, replace inverse", "= { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename", "group.name ) data_group = geometry.data_groups[group.data_group_index] # define index mapping S = {} #", "# for i, key in _keys_f: j = key.index(idx) v = dV[j] if", "= True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode", "collection): idx = '#%i'%idx if idx!=None else '' s = name[:30-len(idx)] + idx", "on 3rd position # as well as degenerate triangles (i.e., less than 3", "node.abs_transform.loc # the bone's length must not be 0, otherwise Blender ignores it", "# get active scene scene = bpy.data.scenes.active # # add mesh objects (main", "b = choice_required and choice < 0 return choice def draw_gui(): global str_gmdc_filename,", "settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree,", "MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-=", "i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or", "texture coords # for f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v)", "obj.addProperty('name', group.name) # Blender does not like Unicode here obj.addProperty('flags', '%08X' % group.flags)", "in texture coordinates, then they are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All", "i, x in enumerate(data) if i in S] V = select_data(data_group.vertices) # texture", "choice_required=False): b = True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for", "to root while node and id(node) not in node_ids: node_ids.add(id(node)) node = node.parent", "= len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc =", "root while node and id(node) not in node_ids: node_ids.add(id(node)) node = node.parent if", "settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all bones: ',", ") log() try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating", "Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "= w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd", "pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300,", "gmdc_filename ) log( 'CRES file:', cres_filename ) log( 'Settings:' ) log( '--Import bounding", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the following conditions: # # The above copyright notice and this permission notice", "transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True)", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "2 or 1) if res and res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else:", "(.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #-------------------------------------------------------------------------------", "[] for i, t in enumerate(I): if 0 == t[2]: I[i] = (t[2],", "different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t in enumerate(I):", "idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name", "None else: T1 = group.tex_coords and group.tex_coords[:] # copy or None T2 =", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "vertices for i, (b, w) in enumerate(zip(B, W)): for wi, j in enumerate(b):", "children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and bone_set are", "in t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in", "in S] V = select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords)", "to do so, subject to the following conditions: # # The above copyright", "'--Assigning vertices to vertex groups...' ) # map bones B = [tuple(group.bones[j] for", "t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s )", "has a different set of inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No,", "for group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids =", "S[x] = i # map indices I = [(S[i], S[j], S[k]) for i,", "# shape keys # if data_group.keys: log( '--Adding shape keys...' ) keys =", "1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load inverse transforms (if any)", "* from itertools import chain import bpy, Blender from Blender import Draw from", "= select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i, j, k in I]", "100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed =", "zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer =", "\"Write script's log data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign()", ") log( '--Number of transform nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New()", "str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create log file", "str obj.addProperty('name', group.name) # Blender does not like Unicode here obj.addProperty('flags', '%08X' %", "if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ########################################", "20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path", "enumerate(items)), 0x100) b = choice_required and choice < 0 return choice def draw_gui():", "t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f,", "t[1]) log( '--Triangle # %i reordered:' % i, t, '->', I[i] ) if", "rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i,", "in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone' name =", "inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep previously loaded inverse transforms.'],", "BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean v = [_n.abs_transform.loc for _n", "S[j], S[k]) for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree", "Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0)", "bone's length must not be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() <", "geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh)", "in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else", "OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from", "'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name)", "_save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!',", "geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An", "; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y)", "wi == 3: f = 1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j],", "data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i, j, k", "def select_data(data): return [x for i, x in enumerate(data) if i in S]", "= Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj =", "file!' ) close_log_file() display_menu('Error!', ['Could not load geometry file. See log for details.'])", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "I] if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i,", "GMDC file \"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and 2", "!= 'cGeometryDataContainer': res and error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could", "Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\",", "in group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name =", "mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load inverse transforms (if any) #", "# # begin import # log( '==Geometry Data Container Importer======' ) log( 'GMDC", "with zero-index vertex on 3rd position # as well as degenerate triangles (i.e.,", "if evt == 0: Draw.Exit() elif evt == 1: begin_import() elif evt ==", "{} # { old_index -> new_index } j = len(mesh.verts) for i, x", "S] V = select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1", "= make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for", "armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups = True armature.drawType = Blender.Armature.STICK arm_obj", "node = transform_tree.get_node(j) assert not isinstance(node, tuple) # include all nodes down to", "permission notice shall be included in # all copies or substantial portions of", "'no' ) # rigging # if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights)", "in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse()", "# # add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static", "# modify mesh with dV # for i, key in _keys_f: j =", "res.nodes[0].type == 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not a CRES", "Draw.EndAlign() pos_y-= 30 # resource node file selector Draw.Label(\"Resource node file (optional)\", 20,", "choice_required and choice < 0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh,", ") if T1: uv1, uv2, uv3 = T1[i] T1[i] = (uv3, uv1, uv2)", "== str obj.addProperty('name', group.name) # Blender does not like Unicode here obj.addProperty('flags', '%08X'", "# load geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename ) try: res", "into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y,", "pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource node file", "V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in S] I", "(removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import", "True armature.drawType = Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode |=", "buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120, pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\")", "= transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values())", "log( 'Opening CRES file \"%s\"...' % cres_filename ) try: res = load_resource(cres_filename, _save_log", "transform_tree: bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups))) if settings['all_bones']: node_ids", "Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100) b = choice_required", "for writing.']) return # Ok set_log_file(f) # # begin import # log( '==Geometry", "Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016", "for details.']) else: # Ok log( 'Finished!' ) Blender.Redraw() # exit prompt if", "mesh = create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh) obj.name = group.name", "log( 'Opening log file \"%s\" for writing... ' % s ) try: f", "2 or 1) except: print_last_exception() res = False if not res or res.nodes[0].type", "= 230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10,", "0: raise Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] =", "= dd = None # shape keys # if data_group.keys: log( '--Adding shape", "# save reference to current object log( '--Rigging:', data_group.bones and 'yes' or 'no'", "S.values(), 1.0, 1) mesh.calcNormals() v_group_names = None mesh_objects.append(obj) # # load inverse transforms", "v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms", "# add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) # assign", "file (if needed) if _save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening log", "DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools import", "S] I = [(S[i], S[j], S[k]) for i, j, k in I] mesh.verts.extend(V)", "armature.bones.keys()) # add bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ##", "ignores it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail =", "uv2, uv3 = T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t)) < 3:", "' % s ) try: f = open(s, 'w') except IOError as e:", "MAX_PATH, \"Path to resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21,", "100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource node file selector", "shall be included in # all copies or substantial portions of the Software.", "Blender does not like triangles with zero-index vertex on 3rd position # as", "to current object log( '--Rigging:', data_group.bones and 'yes' or 'no' ) # rigging", "if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail", "S[k]) for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and", "Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log = Draw.Create(0)", "Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\",", "of transform nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False", "= Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY #", "Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for gui elements and run event", "texture coordinates, then they are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\",", "len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx]", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools import chain import bpy, Blender", "pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH,", "= False if not res or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not", "= name[:30-len(idx)] + idx # max - 31 characters (?) i = 1", "l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit() elif evt == Draw.", "None # shape keys # if data_group.keys: log( '--Adding shape keys...' ) keys", "file!' ) except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", ") for i in reversed(w): del I[i] if T1: del T1[i] if T2:", "flags assert type(group.name) == str obj.addProperty('name', group.name) # Blender does not like Unicode", "basis #<- groups # # add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh:", "all nodes down to root while node and id(node) not in node_ids: node_ids.add(id(node))", "groups...' ) # map bones B = [tuple(group.bones[j] for j in b) for", "transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log(", "button_events(evt): if evt == 0: Draw.Exit() elif evt == 1: begin_import() elif evt", "group.name # max - 21 characters # save original name and flags assert", "not res or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a GMDC file!'", "400, 30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y, 200,", "import # log( '==Geometry Data Container Importer======' ) log( 'GMDC file:', gmdc_filename )", "i, key in _keys_f: j = key.index(idx) v = dV[j] if v: block_verts[i]+=", "armature modifier(s)...' ) # assign armature modifier # for obj in mesh_objects: modifier", "file for writing.']) return # Ok set_log_file(f) # # begin import # log(", "mesh.addUVLayer('UVMap') # assign texture coords # for f, t in zip(mesh.faces, T1): f.uv", "20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path", "30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0", "smooth=True) # since Blender recalculates normals, setting original normals is useless # instead,", "plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430,", "resource node file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20) pos_y-=", "in t[1], enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s", "= tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer = 'UVMap' return mesh", "str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\") Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20,", "in enumerate(b): if wi == 3: f = 1.0 - sum(w) else: f", "transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w and", "120, pos_y, 100, 30, \"Import geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y,", "True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in", "group.tex_coords2[:] # also, Blender does not like triangles with zero-index vertex on 3rd", "coords # for f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for", "# mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals,", "_bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean v = [_n.abs_transform.loc", "# plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y,", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if", "enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1], enumerate(keys)) if _keys_f: s =", "try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i) stored in", "j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone'", "Replace?', ['Yes, replace inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) ==", ") # create mesh and add it to the scene mesh = create_mesh(group.name,", "'yes' or 'no' ) # rigging # if data_group.bones: B = select_data(data_group.bones) W", "enumerate(keys)) if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey()", "not like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference to", "and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def button_events(evt):", "Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape keys # if data_group.keys: log(", "btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75,", "log( '--Adding shape keys...' ) keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log(", "bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups))) if settings['all_bones']: node_ids =", "[(T1[i], T1[j], T1[k]) for i, j, k in I] if data_group.tex_coords2: T2 =", "begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select')", "def create_mesh(name, V, I, T1, T2): # create mesh # mesh = Blender.Mesh.New(name)", "group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids = set()", "# in the Software without restriction, including without limitation the rights # to", "I] else: T2 = None else: T1 = group.tex_coords and group.tex_coords[:] # copy", "\"%s\" for writing... ' % s ) try: f = open(s, 'w') except", "100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh =", "raise Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v", "= part S = {} # { old_index -> new_index } j =", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "430, pos_y) pos_y-= 30 # plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8,", "< 3: w.append(i) log( '--Triangle # %i' % i, t, 'removed' ) for", "import Draw from Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer ######################################## def", "20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100,", "differ only in texture coordinates, then they are merged together (removes seams)\") btn_all_bones", "select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j],", "dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)' %", "bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) # assign armature modifier", "for i, key in _keys_f: j = key.index(idx) v = dV[j] if v:", "enumerate(I): if 0 == t[2]: I[i] = (t[2], t[0], t[1]) log( '--Triangle #", "mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh:", "ignoreDups=True, smooth=True) # since Blender recalculates normals, setting original normals is useless #", "choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100) b", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "like Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference to current", "a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load geometry file. See log", "= None if cres_filename: # load skeleton log( 'Opening CRES file \"%s\"...' %", "load_resource(cres_filename, _save_log and 2 or 1) if res and res.nodes[0].type == 'cResourceNode': transform_tree", "mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals, setting", "assert not isinstance(node, tuple) # include all nodes down to root while node", "#<- groups # # add bounding geometry # if settings['import_bmesh']: if geometry.static_bmesh: log(", "transform nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups", "= set(map(id, transform_tree)) else: node_ids = set() for j in bone_set: node =", "or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a GMDC file!' ) close_log_file()", "= transform_tree and transform_tree.get_node(idx).name or 'bone' name = make_unique_bone_name(name, idx, v_group_names) v_group_names.add(name) mesh.addVertGroup(name)", "if data_group.tex_coords2: T2 = select_data(data_group.tex_coords2) T2 = [(T2[i], T2[j], T2[k]) for i, j,", "https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for i, t in enumerate(I): if 0 ==", "3: f = 1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f,", "in bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple) # include all nodes", "for b in B] dd = dict() # { index -> unique_bone_name }", "+ node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name,", "return # Ok set_log_file(f) # # begin import # log( '==Geometry Data Container", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "= load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception() res = False if", "end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, }", "transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V, I, T1, T2): # create", "exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ##", ") if v != w and display_menu('The file has a different set of", "of dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey()", "(optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20,", "# map indices I = [(S[i], S[j], S[k]) for i, j, k in", "= [dd.get(j) for j in xrange(max(dd)+1)] # assign vertices for i, (b, w)", "modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end", "k in I] mesh.verts.extend(V) mesh.faces.extend(I) name = transform_tree and transform_tree.get_node(idx).name or 'bone' name", "if cres_filename: # load skeleton log( 'Opening CRES file \"%s\"...' % cres_filename )", ") # rigging # if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log(", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "idx, collection): idx = '#%i'%idx if idx!=None else '' s = name[:30-len(idx)] +", "set of inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep previously loaded", "} for idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx]", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val", "str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC", "inverse transforms (if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w =", "'Opening log file \"%s\" for writing... ' % s ) try: f =", "unique_bone_name } for idx in group.bones: name = transform_tree and transform_tree.get_node(idx).name or 'bone'", "elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #-------------------------------------------------------------------------------", "'--Triangle # %i reordered:' % i, t, '->', I[i] ) if T1: uv1,", "transform_tree.get_node(j) assert not isinstance(node, tuple) # include all nodes down to root while", "finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False): b = True", "res = False if not res or res.nodes[0].type != 'cGeometryDataContainer': res and error(", "Permission is hereby granted, free of charge, to any person obtaining a copy", "B] dd = dict() # { index -> unique_bone_name } for idx in", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "Software without restriction, including without limitation the rights # to use, copy, modify,", "if _save_log: log( '==SKELETON==============================' ) log( transform_tree ) log() try: if settings['remove_doubles']: log(", "= [(S[i], S[j], S[k]) for i, j, k in group.indices] # filtering function", "length must not be 0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025:", "geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if", "230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430,", "pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC", "= 0 def button_events(evt): if evt == 0: Draw.Exit() elif evt == 1:", "# The above copyright notice and this permission notice shall be included in", "= BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean v = [_n.abs_transform.loc for", "# of this software and associated documentation files (the \"Software\"), to deal #", "geometry # if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V,", "len(set(t)) < 3: w.append(i) log( '--Triangle # %i' % i, t, 'removed' )", "idx # max - 31 characters (?) i = 1 while s in", "object (vertices: %i, triangles: %i)...' % (len(V), len(I)) ) # create mesh and", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "= geometry.data_groups[group.data_group_index] # define index mapping S = {} # { old_index ->", "if len(set(t)) < 3: w.append(i) log( '--Triangle # %i' % i, t, 'removed'", "'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not a CRES file!' )", "20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y,", "btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices", "-> new_index } j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))): S[x] =", "['Could not open log file for writing.']) return # Ok set_log_file(f) # #", "in t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for", "(id(_n) in node_ids and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100,", "str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY", "f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D -> OpenGL", "modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def begin_import(): settings = { 'import_bmesh':", "l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt ==", "{ 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename =", "u, v in t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer =", "modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not", "'Scene already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if", "while s in collection: s = '.%i'%i + idx s = name[:30-len(s)] +", ") obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts = mesh.key.blocks[-1].data # modify", "_bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its", "s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s", "uv1, uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle # %i' % i,", "select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' ) # map bones B =", "def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ;", "= 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30", "if (v and node.bone_index in bone_set) else node.abs_transform.loc # the bone's length must", "= [(T1[i], T1[j], T1[k]) for i, j, k in I] if data_group.tex_coords2: T2", "display_menu('Error!', ['Could not open log file for writing.']) return # Ok set_log_file(f) #", "original name and flags assert type(group.name) == str obj.addProperty('name', group.name) # Blender does", "Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in", "calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords # for f,", "any) # if transform_tree: bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups)))", "= 1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) #", "t, 'removed' ) for i in reversed(w): del I[i] if T1: del T1[i]", "doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ only in", "= select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' ) #", "geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename,", "Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- #", "v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I =", "= i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple()", "in enumerate(geometry.dynamic_bmesh): if part: V, I = part S = {} # {", "w = [] for i, t in enumerate(I): if 0 == t[2]: I[i]", "Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) #", "else '' s = name[:30-len(idx)] + idx # max - 31 characters (?)", "if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to vertex", "['Quit']) == 0: Draw.Exit() finally: close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items,", "= tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D -> OpenGL if", "texture coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for", "in enumerate(items)), 0x100) b = choice_required and choice < 0 return choice def", "0, 220, pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event", "inverse transforms.', 'No, keep previously loaded inverse transforms.'], choice_required=True) == 0: raise Exception()", "print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node file. See", "and 'yes' or 'no' ) # rigging # if data_group.bones: B = select_data(data_group.bones)", "transform_tree and transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) #", "\"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100) b = choice_required and choice", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "add mesh objects (main geometry) # mesh_objects = [] for group in geometry.index_groups:", "node.bone_index, armature.bones.keys()) # add bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone)", "also, Blender does not like triangles with zero-index vertex on 3rd position #", ") # basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t:", "set() for idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I = part S", "doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception()", "['Could not load resource node file. See log for details.']) return log() if", "Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones = Draw.Create(0) btn_save_log = Draw.Create(0) Draw.Register(draw_gui, event_handler, button_events)", "if i in S] V = select_data(data_group.vertices) # texture coords if data_group.tex_coords: T1", "uv3 = T1[i] T1[i] = (uv3, uv1, uv2) if T2: uv1, uv2, uv3", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "I, T1, T2): # create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True,", "\"%s\"...' % cres_filename ) try: res = load_resource(cres_filename, _save_log and 2 or 1)", "'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default values for", "if node_ids: log( 'Creating armature...' ) log( '--Number of transform nodes (%i)' %", "False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def begin_import():", "= node.parent if node_ids: log( 'Creating armature...' ) log( '--Number of transform nodes", "open log file for writing.']) return # Ok set_log_file(f) # # begin import", "b = True while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i,", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "# create log file (if needed) if _save_log: s = gmdc_filename + '.import_log.txt'", "= {} # { old_index -> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))):", "'' s = name[:30-len(idx)] + idx # max - 31 characters (?) i", "function def select_data(data): return [x for i, x in enumerate(data) if i in", "for _n in node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)] v", "of this software and associated documentation files (the \"Software\"), to deal # in", "See log for details.']) return log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree", "+ Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign()", "'cGeometryDataContainer': res and error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not", "id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "return # create log file (if needed) if _save_log: s = gmdc_filename +", "T1: del T1[i] if T2: del T2[i] w = None log( '--Creating mesh", "i = 1 while s in collection: s = '.%i'%i + idx s", "log for details.']) return log() if _save_log: log( '==SKELETON==============================' ) log( transform_tree )", "# if data_group.bones: B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to", "if not res or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a GMDC", "Ok set_log_file(f) # # begin import # log( '==Geometry Data Container Importer======' )", "if not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node file. See log", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "= scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' )", "log() try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...'", "str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH = 200", "= [] for group in geometry.index_groups: log( 'Index group \"%s\"' % group.name )", "= map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len,", "if _save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening log file \"%s\" for", "T2: mesh.addUVLayer('UVMap2') mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces, T2): f.uv =", "i, s in enumerate(items)), 0x100) b = choice_required and choice < 0 return", "< 0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "set(map(id, transform_tree)) else: node_ids = set() for j in bone_set: node = transform_tree.get_node(j)", "scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add armature (if any) # if", "Unicode here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference to current object", "draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH", "val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY and", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "# %i' % i, t, 'removed' ) for i in reversed(w): del I[i]", "person obtaining a copy # of this software and associated documentation files (the", "while node and id(node) not in node_ids: node_ids.add(id(node)) node = node.parent if node_ids:", "= parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and its children", "error(e) display_menu('Error!', ['Could not open log file for writing.']) return # Ok set_log_file(f)", "len(I)) ) # create mesh and add it to the scene mesh =", "100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y,", "log( transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles() log()", "T1[i] if T2: del T2[i] w = None log( '--Creating mesh object (vertices:", "== 3: f = 1.0 - sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i],", "in enumerate(V) if i in S] I = [(S[i], S[j], S[k]) for i,", "<reponame>djalex88/blender-gmdc<gh_stars>1-10 #!BPY \"\"\" Name: 'GMDC (.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import", "above copyright notice and this permission notice shall be included in # all", "for f, t in zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v", "log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' ) scene.properties['gmdc_inverse_transforms'] = v # # add", "if wi == 3: f = 1.0 - sum(w) else: f = w[wi]", "coords if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i,", "'Index group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] # define index mapping", "shape keys...' ) keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of", "mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates", "persons to whom the Software is # furnished to do so, subject to", "file browser\") Draw.EndAlign() pos_y-= 30 # resource node file selector Draw.Label(\"Resource node file", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "1 # return to basis #<- groups # # add bounding geometry #", "all bones/transforms; otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y,", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "key in _keys_f: j = key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i])", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "= select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' ) # map bones B", "btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not", "# add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] #", "pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh", "\"Path to resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "except IOError as e: error(e) display_menu('Error!', ['Could not open log file for writing.'])", "= obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False", "close_log_file() display_menu('Error!', ['Could not load geometry file. See log for details.']) return geometry", "notice and this permission notice shall be included in # all copies or", "Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to", "of charge, to any person obtaining a copy # of this software and", "False if not res or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a", "\"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed", "mean v = [_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids and", "# filtering function def select_data(data): return [x for i, x in enumerate(data) if", ") log( transform_tree ) log() try: if settings['remove_doubles']: log( 'Removing doubles...' ) geometry.remove_doubles()", "and group.tex_coords2[:] # also, Blender does not like triangles with zero-index vertex on", "current object log( '--Rigging:', data_group.bones and 'yes' or 'no' ) # rigging #", "'.%i'%i + idx s = name[:30-len(s)] + s i+= 1 return s #---------------------------------------", "pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed", "T1, T2): # create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True)", "if i in S] I = [(S[i], S[j], S[k]) for i, j, k", ") create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has occured. See log", "pos_y = 230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10,", "= transform_tree.get_node(j) assert not isinstance(node, tuple) # include all nodes down to root", "(if any) # if geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log(", "parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) # add bone and", "== Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed", "Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-=", "= Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node", "if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to basis #<- groups", "0, otherwise Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc +", "= load_resource(cres_filename, _save_log and 2 or 1) if res and res.nodes[0].type == 'cResourceNode':", "previously loaded inverse transforms.'], choice_required=True) == 0: raise Exception() except: log( 'Saving inverse", "from Blender.Mathutils import Vector as BlenderVector ######################################## ## Importer ######################################## def create_objects(geometry, transform_tree,", "Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename =", "included in # all copies or substantial portions of the Software. # #", "name block_verts = mesh.key.blocks[-1].data # modify mesh with dV # for i, key", "here obj.addProperty('flags', '%08X' % group.flags) mesh_objects.append(obj) # save reference to current object log(", "# create mesh and add it to the scene mesh = create_mesh(group.name, V,", "uv1, uv2) if T2: uv1, uv2, uv3 = T2[i] T2[i] = (uv3, uv1,", "(uv3, uv1, uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle # %i' %", "\"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and 2 or 1)", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids and", "\"Import bounding geometry\") btn_remove_doubles = Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val,", "# Ok log( 'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit'])", "str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\",", "and associated documentation files (the \"Software\"), to deal # in the Software without", "event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val: Draw.Exit() elif", "transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not a CRES file!' ) except:", "i, t in enumerate(I): if 0 == t[2]: I[i] = (t[2], t[0], t[1])", "0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y", "320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign()", "\"\"\" Name: 'GMDC (.gmdc, .5gd)' Blender: 249 Group: 'Import' Tooltip: 'Import TS2 GMDC", "= open(s, 'w') except IOError as e: error(e) display_menu('Error!', ['Could not open log", "True # use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ]", "close_log_file() ######################################## ## GUI ######################################## def display_menu(caption, items, choice_required=False): b = True while", "assign vertices for i, (b, w) in enumerate(zip(B, W)): for wi, j in", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "= group.tex_coords and group.tex_coords[:] # copy or None T2 = group.tex_coords2 and group.tex_coords2[:]", "t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes:", "node_ids and bone_set are defined at the bottom def make_unique_bone_name(name, idx, collection): idx", "gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and 2 or 1) except: print_last_exception()", "browser\") Draw.EndAlign() pos_y-= 30 # resource node file selector Draw.Label(\"Resource node file (optional)\",", "% group.flags) mesh_objects.append(obj) # save reference to current object log( '--Rigging:', data_group.bones and", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "keep previously loaded inverse transforms.'], choice_required=True) == 0: raise Exception() except: log( 'Saving", "merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20,", "del I[i] if T1: del T1[i] if T2: del T2[i] w = None", "# create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes) armature.update()", "node file (CRES; optional, but recommended)\") Draw.PushButton(\"Select file\", 0x21, 320, pos_y, 100, 20,", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "a CRES file!' ) except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not", "enumerate(data) if i in S] V = select_data(data_group.vertices) # texture coords if data_group.tex_coords:", "# mesh_objects = [] for group in geometry.index_groups: log( 'Index group \"%s\"' %", "if transform_tree: bone_set = set(chain(*(group.bones or [] for group in geometry.index_groups))) if settings['all_bones']:", "return s #--------------------------------------- # get active scene scene = bpy.data.scenes.active # # add", "'No, keep previously loaded inverse transforms.'], choice_required=True) == 0: raise Exception() except: log(", "evt == 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt ==", "idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1], enumerate(keys)) if", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "'\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts", "log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's log data into file", "T2[i] = (uv3, uv1, uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle #", "except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node file.", "= mesh.key.blocks[-1].data # modify mesh with dV # for i, key in _keys_f:", "log file (if needed) if _save_log: s = gmdc_filename + '.import_log.txt' log( 'Opening", "# use vertex groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] =", "event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles =", "if T1: del T1[i] if T2: del T2[i] w = None log( '--Creating", "%i reordered:' % i, t, '->', I[i] ) if T1: uv1, uv2, uv3", "dd = None # shape keys # if data_group.keys: log( '--Adding shape keys...'", "T2[j], T2[k]) for i, j, k in I] else: T2 = None else:", "log( '==Geometry Data Container Importer======' ) log( 'GMDC file:', gmdc_filename ) log( 'CRES", "dV)) ) # basis obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda", "already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v", "== Draw.ESCKEY and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val", "transform_tree = None if cres_filename: # load skeleton log( 'Opening CRES file \"%s\"...'", "str_gmdc_filename.val.strip() cres_filename = str_cres_filename.val.strip() if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return #", "frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's header", "res and error( 'Not a GMDC file!' ) close_log_file() display_menu('Error!', ['Could not load", "handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val =", ") log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all bones: ', settings['all_bones']", "new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices", "v_group_names = None mesh_objects.append(obj) # # load inverse transforms (if any) # if", "# for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True #", "cres_filename: # load skeleton log( 'Opening CRES file \"%s\"...' % cres_filename ) try:", "= sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set) else node.abs_transform.loc # the", "430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 # GMDC file selector", "] = arm_obj scene.update() #<- end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val,", "0x100) b = choice_required and choice < 0 return choice def draw_gui(): global", "+ \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100) b = choice_required and", "(C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted, free of charge,", "Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals, setting original normals", "in # all copies or substantial portions of the Software. # # THE", "i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t =", "file:', gmdc_filename ) log( 'CRES file:', cres_filename ) log( 'Settings:' ) log( '--Import", "'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has occured.", "tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer = 'UVMap' return mesh def", "stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7) ) if v != w and display_menu('The file", "pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH,", "the scene mesh = create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh) obj.name", "if geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V, I = geometry.static_bmesh mesh", "mesh.key.blocks[-1].name = s # set name block_verts = mesh.key.blocks[-1].data # modify mesh with", "= Draw.Toggle(\"All bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise,", "log( 'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import complete!\", ['Quit']) == 0:", "log( '--Triangle # %i reordered:' % i, t, '->', I[i] ) if T1:", "0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's log data into file *.import_log.txt\")", "320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource node", "## Importer ######################################## def create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V,", "[(T2[i], T2[j], T2[k]) for i, j, k in I] else: T2 = None", "data_group = geometry.data_groups[group.data_group_index] # define index mapping S = {} # { old_index", "btn_all_bones, btn_remove_doubles, btn_save_log pos_y = 230 ; MAX_PATH = 200 # frame Blender.BGL.glColor3f(0.75,", "= Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones =", "pos_y, 400, 30) pos_y-= 30 # GMDC file selector Draw.Label(\"GMDC file\", 20, pos_y,", "Draw.Label(\"GMDC file\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_gmdc_filename = Draw.String(\"\", 0x10,", "bones\", 0x33, 220, pos_y, 100, 20, btn_all_bones.val, \"Import all bones/transforms; otherwise, used bones", "= 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for node in transform_nodes: if id(node)", "create_mesh(name, V, I, T1, T2): # create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V)", "free of charge, to any person obtaining a copy # of this software", "bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names", "for i, x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t", ") mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set()", "begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log =", "old_index -> new_index } for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i #", "0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's header s = \"GMDC", "249 Group: 'Import' Tooltip: 'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C)", "Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif", "= set() for idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I = part", "= None mesh_objects.append(obj) # # load inverse transforms (if any) # if geometry.inverse_transforms:", "I = [(S[i], S[j], S[k]) for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I)", "'GMDC file:', gmdc_filename ) log( 'CRES file:', cres_filename ) log( 'Settings:' ) log(", "except: print_last_exception() res = False if not res or res.nodes[0].type != 'cGeometryDataContainer': res", "b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)), 0x100)", "######################################## def display_menu(caption, items, choice_required=False): b = True while b: choice = Draw.PupMenu('%s%%t|'%caption", "mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding", ") # assign armature modifier # for obj in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE)", "# Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) # # Permission is hereby granted, free", "in node_ids and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and", "geometry) # mesh_objects = [] for group in geometry.index_groups: log( 'Index group \"%s\"'", "V, I, T1, T2): # create mesh # mesh = Blender.Mesh.New(name) mesh.verts.extend(V) mesh.faces.extend(I,", "import * from itertools import chain import bpy, Blender from Blender import Draw", "settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids = set() for j in bone_set:", "Blender recalculates normals, setting original normals is useless # instead, calculate normals mesh.calcNormals()", "reference to current object log( '--Rigging:', data_group.bones and 'yes' or 'no' ) #", "bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] ) log( '--Import all", "like triangles with zero-index vertex on 3rd position # as well as degenerate", "= Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import bounding geometry\") btn_remove_doubles", "mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj = scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names =", "i, j, k in I] else: T2 = None else: T1 = group.tex_coords", "log() transform_tree = None if cres_filename: # load skeleton log( 'Opening CRES file", "has occured. See log for details.']) else: # Ok log( 'Finished!' ) Blender.Redraw()", "not isinstance(node, tuple) # include all nodes down to root while node and", "key.index(idx) v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "parent_bone=None): for node in transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head", "node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v))", "200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20,", "inverse transforms.'], choice_required=True) == 0: raise Exception() except: log( 'Saving inverse transforms in", "= BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name = make_unique_bone_name(node.name, node.bone_index, armature.bones.keys()) #", "map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len, dV))", "= [(S[i], S[j], S[k]) for i, j, k in I] mesh.verts.extend(V) mesh.faces.extend(I) name", "uv2) if len(set(t)) < 3: w.append(i) log( '--Triangle # %i' % i, t,", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "= name[:30-len(s)] + s i+= 1 return s #--------------------------------------- # get active scene", "Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's header s", "k in group.indices] # filtering function def select_data(data): return [x for i, x", "geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V)", "Software, and to permit persons to whom the Software is # furnished to", "they are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33, 220, pos_y,", "(TS2)\" Blender.BGL.glColor3f(0.8, 0.8, 0.8) Blender.BGL.glRecti(10, pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30)", "if (id(_n) in node_ids and _n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if", "bounding mesh...' ) V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj", "#------------------------------------------------------------------------------- # set default values for gui elements and run event loop str_gmdc_filename", "str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh = Draw.Create(0) btn_remove_doubles = Draw.Create(1) btn_all_bones", "mesh objects (main geometry) # mesh_objects = [] for group in geometry.index_groups: log(", "'b_mesh' v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh): if part: V, I", "Blender ignores it if (node.abs_transform.loc-v).len() < 0.025: v = node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail", "j in bone_set: node = transform_tree.get_node(j) assert not isinstance(node, tuple) # include all", "T2) obj = scene.objects.new(mesh) obj.name = group.name # max - 21 characters #", "for i, (b, w) in enumerate(zip(B, W)): for wi, j in enumerate(b): if", "group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] # define index mapping S", "cres_filename ) try: res = load_resource(cres_filename, _save_log and 2 or 1) if res", "t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t)", "= Draw.String(\"\", 0x10, 20, pos_y, 300, 20, str_gmdc_filename.val, MAX_PATH, \"Path to GMDC file\")", "res or res.nodes[0].type != 'cGeometryDataContainer': res and error( 'Not a GMDC file!' )", "(uv3, uv1, uv2) if T2: uv1, uv2, uv3 = T2[i] T2[i] = (uv3,", "pos_y, 430, pos_y+30) Draw.Label(s, 20, pos_y, 400, 30) pos_y-= 30 # GMDC file", "scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add bones armature.makeEditable() add_bones_to_armature(transform_tree.root_nodes)", "geometry (Ctrl + Enter)\") Draw.PushButton(\"Exit\", 0, 220, pos_y, 100, 30, \"Terminate the script", "display_menu('Error!', ['Select GMDC file.']) return # create log file (if needed) if _save_log:", "x in enumerate(sorted(set(chain(*I)))): S[x] = i+j rot, loc = geometry.inverse_transforms[idx] t = Transform(loc,", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "\"Open file browser\") Draw.EndAlign() pos_y-= 35 # options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\",", "to vertex groups...' ) # map bones B = [tuple(group.bones[j] for j in", "sum(w) else: f = w[wi] mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE", "return [x for i, x in enumerate(data) if i in S] V =", "Draw.PushButton(\"Select file\", 0x11, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 30", "scene.objects.new(mesh) obj.name = 'b_mesh' v_group_names = set() for idx, part in enumerate(geometry.dynamic_bmesh): if", "t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if", "bones B = [tuple(group.bones[j] for j in b) for b in B] dd", "create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh) obj.name = group.name # max", "bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write", "if not gmdc_filename: display_menu('Error!', ['Select GMDC file.']) return # create log file (if", "v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to basis #<- groups #", "choice_required=True) == 0: raise Exception() except: log( 'Saving inverse transforms in scene.properties[\"gmdc_inverse_transforms\"]' )", "otherwise, used bones only\") btn_save_log = Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20,", "0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename): global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename):", "block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to basis #<- groups # #", "log( '--Adding armature modifier(s)...' ) # assign armature modifier # for obj in", "import chain import bpy, Blender from Blender import Draw from Blender.Mathutils import Vector", "= None else: T1 = group.tex_coords and group.tex_coords[:] # copy or None T2", "[_n.abs_transform.loc for _n in node.child_nodes if (id(_n) in node_ids and _n.bone_index in bone_set)]", "log( 'Removing doubles...' ) geometry.remove_doubles() log() log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings)", "in the Software without restriction, including without limitation the rights # to use,", "= Draw.Toggle(\"Rm. doubles\", 0x32, 120, pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ", "i in S] I = [(S[i], S[j], S[k]) for i, j, k in", "i in reversed(w): del I[i] if T1: del T1[i] if T2: del T2[i]", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "T1: mesh.addUVLayer('UVMap') # assign texture coords # for f, t in zip(mesh.faces, T1):", "load geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename ) try: res =", "= arm_obj scene.update() #<- end def begin_import(): settings = { 'import_bmesh': btn_import_bmesh.val, 'remove_doubles':", "S = {} # { old_index -> new_index } j = len(mesh.verts) for", "for j in xrange(max(dd)+1)] # assign vertices for i, (b, w) in enumerate(zip(B,", "f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer = 'UVMap' return", "in reversed(w): del I[i] if T1: del T1[i] if T2: del T2[i] w", ") # map bones B = [tuple(group.bones[j] for j in b) for b", "objects (main geometry) # mesh_objects = [] for group in geometry.index_groups: log( 'Index", "I[i] ) if T1: uv1, uv2, uv3 = T1[i] T1[i] = (uv3, uv1,", "from itertools import chain import bpy, Blender from Blender import Draw from Blender.Mathutils", "create mesh and add it to the scene mesh = create_mesh(group.name, V, I,", "i, t, 'removed' ) for i in reversed(w): del I[i] if T1: del", "tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D -> OpenGL if T2:", "not like triangles with zero-index vertex on 3rd position # as well as", "Draw.ESCKEY and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif", "elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed", "options Draw.BeginAlign() btn_import_bmesh = Draw.Toggle(\"Bound. mesh\", 0x31, 20, pos_y, 100, 20, btn_import_bmesh.val, \"Import", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "bpy, Blender from Blender import Draw from Blender.Mathutils import Vector as BlenderVector ########################################", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "skeleton log( 'Opening CRES file \"%s\"...' % cres_filename ) try: res = load_resource(cres_filename,", "nodes (%i)' % len(node_ids) ) armature = Blender.Armature.New() armature.envelopes = False armature.vertexGroups =", "and/or sell # copies of the Software, and to permit persons to whom", "log() # load geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename ) try:", "in group.indices] # filtering function def select_data(data): return [x for i, x in", "{ old_index -> new_index } j = len(mesh.verts) for i, x in enumerate(sorted(set(chain(*I)))):", "keys...' ) keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV:", "node file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20) pos_y-= 20", "add armature (if any) # if transform_tree: bone_set = set(chain(*(group.bones or [] for", "%i, %i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx, s in", "T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i, j, k in", "30 # resource node file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200,", "group.indices] # filtering function def select_data(data): return [x for i, x in enumerate(data)", "obj.insertShapeKey() mesh.key.blocks[-1].name = s # set name block_verts = mesh.key.blocks[-1].data # modify mesh", "== Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt == Draw.RETKEY and val and (l_ctrl_key_pressed", "return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles, btn_save_log pos_y =", "to the scene mesh = create_mesh(group.name, V, I, T1, T2) obj = scene.objects.new(mesh)", "settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V, I = geometry.static_bmesh", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "20, \"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource node file selector Draw.Label(\"Resource", "'--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I)) ) # create", "vertices to vertex groups...' ) # map bones B = [tuple(group.bones[j] for j", "gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt,", "\"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] # define index mapping S =", "del T1[i] if T2: del T2[i] w = None log( '--Creating mesh object", "= False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<- end def", "# # The above copyright notice and this permission notice shall be included", "elements and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\") btn_import_bmesh =", "10, 430, pos_y) pos_y-= 30 # plugin's header s = \"GMDC Importer (TS2)\"", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "T1: uv1, uv2, uv3 = T1[i] T1[i] = (uv3, uv1, uv2) if T2:", "T2: uv1, uv2, uv3 = T2[i] T2[i] = (uv3, uv1, uv2) if len(set(t))", "coordinates, then they are merged together (removes seams)\") btn_all_bones = Draw.Toggle(\"All bones\", 0x33,", "block_verts = mesh.key.blocks[-1].data # modify mesh with dV # for i, key in", "map indices I = [(S[i], S[j], S[k]) for i, j, k in group.indices]", "'Import TS2 GMDC file' \"\"\" #------------------------------------------------------------------------------- # Copyright (C) 2016 DjAlex88 (https://github.com/djalex88/) #", "def button_events(evt): if evt == 0: Draw.Exit() elif evt == 1: begin_import() elif", "# if settings['import_bmesh']: if geometry.static_bmesh: log( 'Creating static bounding mesh...' ) V, I", "dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names", "== 'cResourceNode': transform_tree = build_transform_tree(res.nodes) else: res and error( 'Not a CRES file!'", "global gmdc_filename str_gmdc_filename.val = filename def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def", "[t.transformPoint(Vector(*x)).to_tuple() for i, x in enumerate(V) if i in S] I = [(S[i],", "= geometry.inverse_transforms[idx] t = Transform(loc, rot).get_inverse() V = [t.transformPoint(Vector(*x)).to_tuple() for i, x in", "i) for i, s in enumerate(items)), 0x100) b = choice_required and choice <", "# set default values for gui elements and run event loop str_gmdc_filename =", "data_group.keys: log( '--Adding shape keys...' ) keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts)", "add_bones_to_armature(transform_tree.root_nodes) armature.update() log( '--Adding armature modifier(s)...' ) # assign armature modifier # for", "different set of inverse transforms. Replace?', ['Yes, replace inverse transforms.', 'No, keep previously", "settings['all_bones'] ) log() # load geometry log( 'Opening GMDC file \"%s\"...' % gmdc_filename", "v = dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to", "subroutines def create_mesh(name, V, I, T1, T2): # create mesh # mesh =", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "for i in reversed(w): del I[i] if T1: del T1[i] if T2: del", ") except: print_last_exception() if not transform_tree: close_log_file() display_menu('Error!', ['Could not load resource node", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select') elif evt == 0x21: Blender.Window.FileSelector(set_cres_filename, 'Select') #------------------------------------------------------------------------------- # set default", "u, v in t) mesh.activeUVLayer = 'UVMap' return mesh def add_bones_to_armature(transform_nodes, parent_bone=None): for", "i, (b, w) in enumerate(zip(B, W)): for wi, j in enumerate(b): if wi", "= dV[j] if v: block_verts[i]+= BlenderVector(*v[i]) obj.activeShape = 1 # return to basis", "or 1) except: print_last_exception() res = False if not res or res.nodes[0].type !=", "= s # set name block_verts = mesh.key.blocks[-1].data # modify mesh with dV", "evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed =", "bone and its children armature.bones[name] = _bone add_bones_to_armature(node.child_nodes, _bone) ## ## armature, node_ids", "in transform_nodes: if id(node) in node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) #", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "Blender.Armature.STICK arm_obj = scene.objects.new(armature) # create armature object arm_obj.drawMode |= Blender.Object.DrawModes.XRAY # add", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "and display_menu('The file has a different set of inverse transforms. Replace?', ['Yes, replace", "dd.values()) # add vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)]", "= Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic mean v", "mesh.faces.extend(I, ignoreDups=True, smooth=True) # since Blender recalculates normals, setting original normals is useless", "instead, calculate normals mesh.calcNormals() if T1: mesh.addUVLayer('UVMap') # assign texture coords # for", "CRES file \"%s\"...' % cres_filename ) try: res = load_resource(cres_filename, _save_log and 2", "Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 # plugin's header s = \"GMDC Importer", "200 # frame Blender.BGL.glColor3f(0.75, 0.75, 0.75) Blender.BGL.glRecti(10, 10, 430, pos_y) pos_y-= 30 #", "obj.insertShapeKey() for idx, s in enumerate(geometry.morph_names): _keys_f = filter(lambda t: idx in t[1],", "None log( '--Creating mesh object (vertices: %i, triangles: %i)...' % (len(V), len(I)) )", "'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic bounding mesh...' ) mesh = Blender.Mesh.New('b_mesh') obj", "choice < 0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename, btn_import_bmesh, btn_all_bones, btn_remove_doubles,", "if evt == Draw.ESCKEY and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed", "mesh...' ) V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj =", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "if settings['all_bones']: node_ids = set(map(id, transform_tree)) else: node_ids = set() for j in", "in mesh_objects: modifier = obj.modifiers.append(Blender.Modifier.Types.ARMATURE) modifier[Blender.Modifier.Settings.VGROUPS ] = True # use vertex groups", "do so, subject to the following conditions: # # The above copyright notice", "log( 'Creating objects...' ) create_objects(geometry, transform_tree, settings) except: print_last_exception() display_menu('Error!', ['An error has", "0: Draw.Exit() elif evt == 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename, 'Select')", "tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse transforms (%i) stored in scene.properties[\"gmdc_inverse_transforms\"]' % (len(w)/7)", "degenerate triangles (i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w =", ") log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles:", "== 0: Draw.Exit() elif evt == 1: begin_import() elif evt == 0x11: Blender.Window.FileSelector(set_gmdc_filename,", "and transform_tree.get_node(idx).name or 'bone' dd[idx] = name = make_unique_bone_name(name, idx, dd.values()) # add", "does not like triangles with zero-index vertex on 3rd position # as well", "permit persons to whom the Software is # furnished to do so, subject", "else: # Ok log( 'Finished!' ) Blender.Redraw() # exit prompt if display_menu(\"Import complete!\",", "obj.name = group.name # max - 21 characters # save original name and", "_n.bone_index in bone_set)] v = sum(v, Vector())*(1./len(v)) if (v and node.bone_index in bone_set)", "mapping S = {} # { old_index -> new_index } for i, x", ") keys = select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i,", "1 return s #--------------------------------------- # get active scene scene = bpy.data.scenes.active # #", "(i.e., less than 3 different indices): # https://www.blender.org/api/249PythonDoc/Mesh.MFaceSeq-class.html#extend # w = [] for", "cres_filename ) log( 'Settings:' ) log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove", "= '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name = s #", "if _keys_f: s = '::'.join(s) log( '\\x20\\x20--Key \"%s\"' % s ) obj.insertShapeKey() mesh.key.blocks[-1].name", "V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name", "geometry.inverse_transforms: v = tuple(chain(*chain(*geometry.inverse_transforms))) try: w = tuple(scene.properties['gmdc_inverse_transforms']) log( 'Scene already has inverse", "LEFTCTRLKEY: l_ctrl_key_pressed = val elif evt == Draw.RIGHTCTRLKEY: r_ctrl_key_pressed = val elif evt", "writing.']) return # Ok set_log_file(f) # # begin import # log( '==Geometry Data", "0x21, 320, pos_y, 100, 20, \"Open file browser\") Draw.EndAlign() pos_y-= 35 # options", "since Blender recalculates normals, setting original normals is useless # instead, calculate normals", "(%i, %i, %i, %i)' % tuple(map(len, dV)) ) # basis obj.insertShapeKey() for idx,", "def set_cres_filename(filename): global cres_filename str_cres_filename.val = filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed", "as e: error(e) display_menu('Error!', ['Could not open log file for writing.']) return #", "1-v) for u, v in t) # Direct3D -> OpenGL if T2: mesh.addUVLayer('UVMap2')", "= Draw.Toggle(\"Save log\", 0x34, 320, pos_y, 100, 20, btn_save_log.val, \"Write script's log data", "300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource node file (CRES; optional, but recommended)\")", "type(group.name) == str obj.addProperty('name', group.name) # Blender does not like Unicode here obj.addProperty('flags',", "log( '--Assigning vertices to vertex groups...' ) # map bones B = [tuple(group.bones[j]", "% s ) try: f = open(s, 'w') except IOError as e: error(e)", "btn_import_bmesh.val, 'remove_doubles': btn_remove_doubles.val, 'all_bones': btn_all_bones.val, } _save_log = bool(btn_save_log.val) gmdc_filename = str_gmdc_filename.val.strip() cres_filename", "select_data(data): return [x for i, x in enumerate(data) if i in S] V", "log for details.']) else: # Ok log( 'Finished!' ) Blender.Redraw() # exit prompt", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "B = select_data(data_group.bones) W = select_data(data_group.weights) log( '--Assigning vertices to vertex groups...' )", "evt == Draw.ESCKEY and val: Draw.Exit() elif evt == Draw. LEFTCTRLKEY: l_ctrl_key_pressed =", "for gui elements and run event loop str_gmdc_filename = Draw.Create(\"\") str_cres_filename = Draw.Create(\"\")", "1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None # shape keys # if", "log( 'Index group \"%s\"' % group.name ) data_group = geometry.data_groups[group.data_group_index] # define index", "vertex group mesh.addVertGroup(name) v_group_names = [dd.get(j) for j in xrange(max(dd)+1)] # assign vertices", "'Creating static bounding mesh...' ) V, I = geometry.static_bmesh mesh = Blender.Mesh.New('b_mesh') mesh.verts.extend(V)", "i, j, k in group.indices] # filtering function def select_data(data): return [x for", "create_objects(geometry, transform_tree, settings): #--------------------------------------- # subroutines def create_mesh(name, V, I, T1, T2): #", "display_menu('Error!', ['Could not load resource node file. See log for details.']) return log()", "21 characters # save original name and flags assert type(group.name) == str obj.addProperty('name',", "log( 'Creating armature...' ) log( '--Number of transform nodes (%i)' % len(node_ids) )", "res.nodes[0].geometry log() transform_tree = None if cres_filename: # load skeleton log( 'Opening CRES", "node.abs_transform.loc + node.abs_transform.rot.get_matrix().col(2)*0.05 _bone.tail = BlenderVector(v.to_tuple()) if parent_bone: _bone.parent = parent_bone name =", "# { old_index -> new_index } j = len(mesh.verts) for i, x in", "and add it to the scene mesh = create_mesh(group.name, V, I, T1, T2)", "del T2[i] w = None log( '--Creating mesh object (vertices: %i, triangles: %i)...'", ") log( '--Import bounding geometry:', settings['import_bmesh'] ) log( '--Remove doubles: ', settings['remove_doubles'] )", "filename def event_handler(evt, val): global l_ctrl_key_pressed, r_ctrl_key_pressed if evt == Draw.ESCKEY and val:", "val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def", "select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i, j, k in I] if", "(b, w) in enumerate(zip(B, W)): for wi, j in enumerate(b): if wi ==", "BlenderVector(*v[i]) obj.activeShape = 1 # return to basis #<- groups # # add", "evt == Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0", "pos_y, 100, 20, btn_remove_doubles.val, \"If some vertices differ only in texture coordinates, then", "file \"%s\"...' % gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and 2 or", "geometry.data_groups[group.data_group_index] # define index mapping S = {} # { old_index -> new_index", "close_log_file() display_menu('Error!', ['Could not load resource node file. See log for details.']) return", "% gmdc_filename ) try: res = load_resource(gmdc_filename, _save_log and 2 or 1) except:", "220, pos_y, 100, 30, \"Terminate the script (Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers", "pos_y) pos_y-= 30 # plugin's header s = \"GMDC Importer (TS2)\" Blender.BGL.glColor3f(0.8, 0.8,", "\"Open file browser\") Draw.EndAlign() pos_y-= 30 # resource node file selector Draw.Label(\"Resource node", "tuple) # include all nodes down to root while node and id(node) not", "log file for writing.']) return # Ok set_log_file(f) # # begin import #", "if idx!=None else '' s = name[:30-len(idx)] + idx # max - 31", "Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def set_gmdc_filename(filename):", "- 31 characters (?) i = 1 while s in collection: s =", "in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I = [(S[i], S[j], S[k])", "not open log file for writing.']) return # Ok set_log_file(f) # # begin", "= choice_required and choice < 0 return choice def draw_gui(): global str_gmdc_filename, str_cres_filename,", "details.']) return geometry = res.nodes[0].geometry log() transform_tree = None if cres_filename: # load", "'--Rigging:', data_group.bones and 'yes' or 'no' ) # rigging # if data_group.bones: B", "selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign() str_cres_filename", "mesh_objects = [] for group in geometry.index_groups: log( 'Index group \"%s\"' % group.name", "[(S[i], S[j], S[k]) for i, j, k in group.indices] # filtering function def", "some vertices differ only in texture coordinates, then they are merged together (removes", "while b: choice = Draw.PupMenu('%s%%t|'%caption + \"|\".join('%s%%x%i'%(s, i) for i, s in enumerate(items)),", "node.parent if node_ids: log( 'Creating armature...' ) log( '--Number of transform nodes (%i)'", "if T2: uv1, uv2, uv3 = T2[i] T2[i] = (uv3, uv1, uv2) if", "(len(V), len(I)) ) # create mesh and add it to the scene mesh", "node_ids = set(map(id, transform_tree)) else: node_ids = set() for j in bone_set: node", "str_cres_filename = Draw.String(\"\", 0x20, 20, pos_y, 300, 20, str_cres_filename.val, MAX_PATH, \"Path to resource", "log( '\\x20\\x20--Length of dV: (%i, %i, %i, %i)' % tuple(map(len, dV)) ) #", "data into file *.import_log.txt\") Draw.EndAlign() pos_y-= 45 # buttons Draw.BeginAlign() Draw.PushButton(\"Import\", 1, 120,", "mesh.assignVertsToGroup(v_group_names[j], [i], f, 1) # 1 - Blender.Mesh.AssignModes.REPLACE v_group_names = dd = None", "max - 31 characters (?) i = 1 while s in collection: s", "} for i, x in enumerate(sorted(set(chain(*group.indices)))): S[x] = i # map indices I", "(?) i = 1 while s in collection: s = '.%i'%i + idx", "# load skeleton log( 'Opening CRES file \"%s\"...' % cres_filename ) try: res", "file has a different set of inverse transforms. Replace?', ['Yes, replace inverse transforms.',", "# resource node file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20)", "node_ids: _bone = Blender.Armature.Editbone() _bone.head = BlenderVector(node.abs_transform.loc.to_tuple()) # compute tail pos as arithmetic", "res = load_resource(cres_filename, _save_log and 2 or 1) if res and res.nodes[0].type ==", "mesh.activeUVLayer = 'UVMap2' for f, t in zip(mesh.faces, T2): f.uv = tuple(BlenderVector(u, 1-v)", "(Esc)\") Draw.EndAlign() #--------------------------------------- # event handlers l_ctrl_key_pressed = 0 r_ctrl_key_pressed = 0 def", "node_ids: node_ids.add(id(node)) node = node.parent if node_ids: log( 'Creating armature...' ) log( '--Number", "def make_unique_bone_name(name, idx, collection): idx = '#%i'%idx if idx!=None else '' s =", "Draw.RETKEY and val and (l_ctrl_key_pressed or r_ctrl_key_pressed): begin_import() l_ctrl_key_pressed = 0 r_ctrl_key_pressed =", "s in collection: s = '.%i'%i + idx s = name[:30-len(s)] + s", "hereby granted, free of charge, to any person obtaining a copy # of", "if data_group.tex_coords: T1 = select_data(data_group.tex_coords) T1 = [(T1[i], T1[j], T1[k]) for i, j,", "= select_data(data_group.keys) dV = map(select_data, data_group.dVerts) log( '\\x20\\x20--Length of dV: (%i, %i, %i,", "groups modifier[Blender.Modifier.Settings.ENVELOPES] = False # not envelopes modifier[Blender.Modifier.Settings.OBJECT ] = arm_obj scene.update() #<-", "set(chain(*(group.bones or [] for group in geometry.index_groups))) if settings['all_bones']: node_ids = set(map(id, transform_tree))", "# THE SOFTWARE. #------------------------------------------------------------------------------- from gmdc_tools import * from itertools import chain import", "s ) try: f = open(s, 'w') except IOError as e: error(e) display_menu('Error!',", "T2): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) mesh.activeUVLayer = 'UVMap'", "\"If some vertices differ only in texture coordinates, then they are merged together", "file selector Draw.Label(\"Resource node file (optional)\", 20, pos_y, 200, 20) pos_y-= 20 Draw.BeginAlign()", "mesh.verts.extend(V) mesh.faces.extend(I) obj = scene.objects.new(mesh) obj.name = 'b_mesh' if geometry.dynamic_bmesh: log( 'Creating dynamic", "zip(mesh.faces, T1): f.uv = tuple(BlenderVector(u, 1-v) for u, v in t) # Direct3D", "obj.activeShape = 1 # return to basis #<- groups # # add bounding" ]
[ "return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config):", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED", ":, start:end] # (B, H, W, 3) zb = candidates[:, :, :, i", "yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3)", "AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", ":, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3]", "strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius to be 1", "= tf.concat([albedo, normal, depth, var, candidates], axis=3) # x: (B, H, W, numOutput)", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "i in range(8): # x: (Batch, H, W, 100) x = conv2d(x, config)", "candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3) output =", "# x: (B, H, W, numOutput) x = ConvolutionNet(config, x) # (B, H,", "following conditions are met: 1. Redistributions of source code must retain the above", "filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial", ":, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B, H, W,", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY", "1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1,", "tensorflow as tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros", "# (B, H, W, 3) denoised = weighted_average(yi - zb, wb) output +=", "layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op,", "kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius to be 1 def ConvolutionNet(config,", "3 * (config['numCandidates'] - 1) # (B, H, W, 3): the candidate with", "= input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B,", "numInputChannels) # x: (Batch, H, W, 100) x = conv2d(x, config) for i", "copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "= denoisingWeights[:, :, :, start:end] # (B, H, W, 3) zb = candidates[:,", "3) denoised = weighted_average(yi - zb, wb) output += denoised # (B, H,", "input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS']", "= i * config['kernelArea'] end = (i + 1) * config['kernelArea'] # (B,", "1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :,", "op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000,", "(Batch, H, W, numOutput) x = conv2d_last(x, config) return x def MainNet(config, input):", "All rights reserved. Redistribution and use in source and binary forms, with or", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import", "def MainNet(config, input): # input: (B, H, W, numChannels) N = input[:, :,", "H, W, 3) output = tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start", "zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights =", "3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :,", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the", "* config['kernelArea'] # (B, H, W, kernelArea) wb = denoisingWeights[:, :, :, start:end]", "input): # input: (B, H, W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS']", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "output += denoised # (B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True)", "1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius to be 1 def", "i in range(config['numCandidates'] - 1): start = i * config['kernelArea'] end = (i", "met: 1. Redistributions of source code must retain the above copyright notice, this", "depth, var, candidates], axis=3) # x: (B, H, W, numOutput) x = ConvolutionNet(config,", "weighted_average(yi - zb, wb) output += denoised # (B, H, W, 1) sumWeights", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", ":, :, i * 3:i * 3 + 3] # (B, H, W,", "op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None,", "# (B, H, W, 3): the candidate with least radius yi = candidates[:,", "must reproduce the above copyright notice, this list of conditions and the following", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1) # (B, H,", "- zb, wb) output += denoised # (B, H, W, 1) sumWeights =", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,", "modification, are permitted provided that the following conditions are met: 1. Redistributions of", "candidates], axis=3) # x: (B, H, W, numOutput) x = ConvolutionNet(config, x) #", "100) x = conv2d(x, config) for i in range(8): # x: (Batch, H,", "padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']),", "H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb * sumWeights", "GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights", "+ 3 * config['numCandidates']] # x: (B, H, W, numInputChannels) x = tf.concat([albedo,", "layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return", "permitted provided that the following conditions are met: 1. Redistributions of source code", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "W, numInputChannels) # x: (Batch, H, W, 100) x = conv2d(x, config) for", "= input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS']", "distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1] grads =", "x: (B, H, W, numOutput) x = ConvolutionNet(config, x) # (B, H, W,", "CGLAB All rights reserved. Redistribution and use in source and binary forms, with", "- tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return w def conv2d(x, config):", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "3] # (B, H, W, 3) output = tf.zeros_like(albedo) for i in range(config['numCandidates']", "materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "in source and binary forms, with or without modification, are permitted provided that", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "range(config['numCandidates'] - 1): start = i * config['kernelArea'] end = (i + 1)", "keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1) # (B, H, W, 3):", "or without modification, are permitted provided that the following conditions are met: 1.", "import copy import tensorflow as tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers", "the above copyright notice, this list of conditions and the following disclaimer in", "CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "= ConvolutionNet(config, x) # (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x)", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] +", "+ config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']]", "list of conditions and the following disclaimer. 2. Redistributions in binary form must", "(numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to prevent all zero denoisingWeights =", ":, lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3) output = tf.zeros_like(albedo) for", "this list of conditions and the following disclaimer. 2. Redistributions in binary form", "wb = denoisingWeights[:, :, :, start:end] # (B, H, W, 3) zb =", "grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z):", "return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W,", "from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image", "config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth =", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "and the following disclaimer in the documentation and/or other materials provided with the", "padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius to be", "OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import tensorflow as tf", "# x: (Batch, H, W, 100) x = conv2d(x, config) for i in", "1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb * sumWeights return output,", "config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] #", "the following disclaimer in the documentation and/or other materials provided with the distribution.", "2021, CGLAB All rights reserved. Redistribution and use in source and binary forms,", "notice, this list of conditions and the following disclaimer in the documentation and/or", "activations from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad):", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1,", "W, 3) denoised = weighted_average(yi - zb, wb) output += denoised # (B,", "1): start = i * config['kernelArea'] end = (i + 1) * config['kernelArea']", "POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import tensorflow as tf from tensorflow.keras", "1) * config['kernelArea'] # (B, H, W, kernelArea) wb = denoisingWeights[:, :, :,", "conv2d(x, config) for i in range(8): # x: (Batch, H, W, 100) x", "Redistributions of source code must retain the above copyright notice, this list of", "= (i + 1) * config['kernelArea'] # (B, H, W, kernelArea) wb =", "with or without modification, are permitted provided that the following conditions are met:", "following disclaimer in the documentation and/or other materials provided with the distribution. THIS", "H, W, kernelArea) # [-inf, 0], for numerical stability w = z -", "reproduce the above copyright notice, this list of conditions and the following disclaimer", "of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce", "(B, H, W, numOutput) x = ConvolutionNet(config, x) # (B, H, W, kernelArea", "albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :,", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "= op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return", "input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] +", "= conv2d_last(x, config) return x def MainNet(config, input): # input: (B, H, W,", "Copyright (c) 2021, CGLAB All rights reserved. Redistribution and use in source and", "Redistribution and use in source and binary forms, with or without modification, are", "x) # (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4", "(B, H, W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "* config['kernelArea'] end = (i + 1) * config['kernelArea'] # (B, H, W,", "weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average def", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "binary forms, with or without modification, are permitted provided that the following conditions", "H, W, 3) denoised = weighted_average(yi - zb, wb) output += denoised #", "H, W, 100) x = conv2d(x, config) for i in range(8): # x:", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "# z: (B, H, W, kernelArea) # [-inf, 0], for numerical stability w", "var, candidates], axis=3) # x: (B, H, W, numOutput) x = ConvolutionNet(config, x)", "denoisingWeights = activations.relu(x) + 1e-4 # to prevent all zero denoisingWeights = denoisingWeights", "= _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average", "normal, depth, var, candidates], axis=3) # x: (B, H, W, numOutput) x =", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(),", "H, W, numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates], axis=3) # x:", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :,", "config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates =", "kernelArea) wb = denoisingWeights[:, :, :, start:end] # (B, H, W, 3) zb", "config) return x def MainNet(config, input): # input: (B, H, W, numChannels) N", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def", "x: (B, H, W, numInputChannels) # x: (Batch, H, W, 100) x =", "for numerical stability w = z - tf.reduce_max(z) # [0, 1] w =", "(B, H, W, numInputChannels) # x: (Batch, H, W, 100) x = conv2d(x,", "conditions and the following disclaimer in the documentation and/or other materials provided with", "= conv2d(x, config) # x: (Batch, H, W, numOutput) x = conv2d_last(x, config)", "least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B, H,", "+ 3] # (B, H, W, 3) denoised = weighted_average(yi - zb, wb)", "(B, H, W, 3) denoised = weighted_average(yi - zb, wb) output += denoised", "make initial radius to be 1 def ConvolutionNet(config, x): # x: (B, H,", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "def kernelPredictingWeights(z): # z: (B, H, W, kernelArea) # [-inf, 0], for numerical", "list of conditions and the following disclaimer in the documentation and/or other materials", "3 * config['numCandidates']] # x: (B, H, W, numInputChannels) x = tf.concat([albedo, normal,", "config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B, H, W, numInputChannels) x", "above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions", "1000000) return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B, H,", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "H, W, 3) zb = candidates[:, :, :, i * 3:i * 3", "var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :,", "config) # x: (Batch, H, W, numOutput) x = conv2d_last(x, config) return x", "x = ConvolutionNet(config, x) # (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights =", "W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to prevent all", ":, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B, H, W, numInputChannels)", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,", "W, kernelArea) wb = denoisingWeights[:, :, :, start:end] # (B, H, W, 3)", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "+ 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:,", "for i in range(8): # x: (Batch, H, W, 100) x = conv2d(x,", "# [0, 1] w = activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'],", "numOutput) x = conv2d_last(x, config) return x def MainNet(config, input): # input: (B,", "bias_initializer=zeros())(x) # Constant to make initial radius to be 1 def ConvolutionNet(config, x):", "@tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad,", "start:end] # (B, H, W, 3) zb = candidates[:, :, :, i *", "[0, 1] w = activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'],", "= input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS']", "wb) output += denoised # (B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1,", "the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius to", "= conv2d(x, config) for i in range(8): # x: (Batch, H, W, 100)", "i * 3:i * 3 + 3] # (B, H, W, 3) denoised", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF", "1e-4 # to prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True)", "start = i * config['kernelArea'] end = (i + 1) * config['kernelArea'] #", "def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x)", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000)", "with least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B,", "_module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1]", "2. Redistributions in binary form must reproduce the above copyright notice, this list", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return w def conv2d(x, config): return", "following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice,", "3] # (B, H, W, 3) denoised = weighted_average(yi - zb, wb) output", "the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED", "import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def", "documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY", "activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'],", "# input: (B, H, W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] +", "kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make initial radius", "NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "W, numOutput) x = ConvolutionNet(config, x) # (B, H, W, kernelArea * (numCandidates-1))", "must retain the above copyright notice, this list of conditions and the following", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "notice, this list of conditions and the following disclaimer. 2. Redistributions in binary", "= tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start = i * config['kernelArea']", "the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright", "layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to make", "from tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so')", "axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1) # (B, H, W,", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "[-inf, 0], for numerical stability w = z - tf.reduce_max(z) # [0, 1]", "tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z:", "grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads]", "forms, with or without modification, are permitted provided that the following conditions are", "i * config['kernelArea'] end = (i + 1) * config['kernelArea'] # (B, H,", "+ 1) * config['kernelArea'] # (B, H, W, kernelArea) wb = denoisingWeights[:, :,", "IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "zb = candidates[:, :, :, i * 3:i * 3 + 3] #", "candidate with least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] #", "x = conv2d_last(x, config) return x def MainNet(config, input): # input: (B, H,", "range(8): # x: (Batch, H, W, 100) x = conv2d(x, config) # x:", "radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B, H, W,", "* 3 + 3] # (B, H, W, 3) denoised = weighted_average(yi -", "kernelPredictingWeights(z): # z: (B, H, W, kernelArea) # [-inf, 0], for numerical stability", "that the following conditions are met: 1. Redistributions of source code must retain", ":, :, lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3) output = tf.zeros_like(albedo)", "stability w = z - tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return", "= weighted_average(yi - zb, wb) output += denoised # (B, H, W, 1)", "0], for numerical stability w = z - tf.reduce_max(z) # [0, 1] w", "(B, H, W, kernelArea) # [-inf, 0], for numerical stability w = z", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "radius to be 1 def ConvolutionNet(config, x): # x: (B, H, W, numInputChannels)", "tf.concat([albedo, normal, depth, var, candidates], axis=3) # x: (B, H, W, numOutput) x", "sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb * sumWeights return output, denoisingWeights", "of source code must retain the above copyright notice, this list of conditions", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "2-Clause License Copyright (c) 2021, CGLAB All rights reserved. Redistribution and use in", "* (config['numCandidates'] - 1) # (B, H, W, 3): the candidate with least", "_weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights)", "w = activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"],", "= activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same',", "(Batch, H, W, 100) x = conv2d(x, config) # x: (Batch, H, W,", "numerical stability w = z - tf.reduce_max(z) # [0, 1] w = activations.softmax(w)", "config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x,", "3): the candidate with least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx +", "OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this", "image = op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads =", "1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :,", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", ":, i * 3:i * 3 + 3] # (B, H, W, 3)", "denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1) #", "H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to prevent", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "w = z - tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return w", "H, W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo =", "weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W, kernelArea) # [-inf,", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "W, numOutput) x = conv2d_last(x, config) return x def MainNet(config, input): # input:", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "= candidates[:, :, :, i * 3:i * 3 + 3] # (B,", "(B, H, W, 3) output = tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1):", "return x def MainNet(config, input): # input: (B, H, W, numChannels) N =", "# (B, H, W, kernelArea) wb = denoisingWeights[:, :, :, start:end] # (B,", "config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'],", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "# to prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module =", "def _weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image,", "as tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module", ":, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1]", "W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb * sumWeights return", "in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS", "x: (Batch, H, W, 100) x = conv2d(x, config) for i in range(8):", "x: (B, H, W, numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates], axis=3)", "= input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS']", "and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE", "<reponame>hchoi405/dppm \"\"\" BSD 2-Clause License Copyright (c) 2021, CGLAB All rights reserved. Redistribution", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "H, W, kernelArea) wb = denoisingWeights[:, :, :, start:end] # (B, H, W,", "100) x = conv2d(x, config) # x: (Batch, H, W, numOutput) x =", "H, W, numInputChannels) # x: (Batch, H, W, 100) x = conv2d(x, config)", "to prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx =", ":, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "W, 3) output = tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start =", "config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal =", "# [-inf, 0], for numerical stability w = z - tf.reduce_max(z) # [0,", "+= denoised # (B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output", "tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image =", "kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D(", "numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates], axis=3) # x: (B, H,", "z: (B, H, W, kernelArea) # [-inf, 0], for numerical stability w =", "# (B, H, W, 3) output = tf.zeros_like(albedo) for i in range(config['numCandidates'] -", "CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE", "= 3 * (config['numCandidates'] - 1) # (B, H, W, 3): the candidate", "this list of conditions and the following disclaimer in the documentation and/or other", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "Redistributions in binary form must reproduce the above copyright notice, this list of", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start = i * config['kernelArea'] end", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "x = conv2d(x, config) for i in range(8): # x: (Batch, H, W,", "HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "(i + 1) * config['kernelArea'] # (B, H, W, kernelArea) wb = denoisingWeights[:,", "tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\")", "for i in range(config['numCandidates'] - 1): start = i * config['kernelArea'] end =", "rights reserved. Redistribution and use in source and binary forms, with or without", "import tensorflow as tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers import GlorotUniform,", "the above copyright notice, this list of conditions and the following disclaimer. 2.", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", ":config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B, H, W, numInputChannels) x =", "and the following disclaimer. 2. Redistributions in binary form must reproduce the above", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import tensorflow", "\"\"\" import copy import tensorflow as tf from tensorflow.keras import layers, activations from", "w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(),", "config) for i in range(8): # x: (Batch, H, W, 100) x =", "W, 100) x = conv2d(x, config) # x: (Batch, H, W, numOutput) x", "binary form must reproduce the above copyright notice, this list of conditions and", "# x: (Batch, H, W, 100) x = conv2d(x, config) # x: (Batch,", "W, kernelArea) # [-inf, 0], for numerical stability w = z - tf.reduce_max(z)", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "be 1 def ConvolutionNet(config, x): # x: (B, H, W, numInputChannels) # x:", "numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :,", "input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] +", "return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant to", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:,", "config['numCandidates']] # x: (B, H, W, numInputChannels) x = tf.concat([albedo, normal, depth, var,", "config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 *", "retain the above copyright notice, this list of conditions and the following disclaimer.", "denoised # (B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output +=", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT", "W, 3): the candidate with least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx", "all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 *", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", ":, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3", "reserved. Redistribution and use in source and binary forms, with or without modification,", "zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates']", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import tensorflow as", "= _module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W, kernelArea) # [-inf, 0],", "in binary form must reproduce the above copyright notice, this list of conditions", "of conditions and the following disclaimer in the documentation and/or other materials provided", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "x def MainNet(config, input): # input: (B, H, W, numChannels) N = input[:,", "image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average", "H, W, numOutput) x = conv2d_last(x, config) return x def MainNet(config, input): #", "/ tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1) # (B,", "candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x:", "= candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3) output", "kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1),", "W, 100) x = conv2d(x, config) for i in range(8): # x: (Batch,", "x = tf.concat([albedo, normal, depth, var, candidates], axis=3) # x: (B, H, W,", "# (B, H, W, 3) zb = candidates[:, :, :, i * 3:i", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import", ":, :, start:end] # (B, H, W, 3) zb = candidates[:, :, :,", "# (B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb", "conv2d(x, config) # x: (Batch, H, W, numOutput) x = conv2d_last(x, config) return", "code must retain the above copyright notice, this list of conditions and the", "and binary forms, with or without modification, are permitted provided that the following", "DAMAGE. \"\"\" import copy import tensorflow as tf from tensorflow.keras import layers, activations", "source code must retain the above copyright notice, this list of conditions and", "+ 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:,", "- 1): start = i * config['kernelArea'] end = (i + 1) *", "def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x)", "ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "(B, H, W, 1) sumWeights = tf.reduce_sum(wb, axis=-1, keepdims=True) output += zb *", "MainNet(config, input): # input: (B, H, W, numChannels) N = input[:, :, :,", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W, kernelArea) #", "candidates[:, :, :, i * 3:i * 3 + 3] # (B, H,", "config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) # Constant", "x: (Batch, H, W, numOutput) x = conv2d_last(x, config) return x def MainNet(config,", "# x: (B, H, W, numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates],", "kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to prevent all zero", "in range(8): # x: (Batch, H, W, 100) x = conv2d(x, config) #", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "\"\"\" BSD 2-Clause License Copyright (c) 2021, CGLAB All rights reserved. Redistribution and", "# (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 #", "denoisingWeights[:, :, :, start:end] # (B, H, W, 3) zb = candidates[:, :,", "lastCandidIdx:lastCandidIdx + 3] # (B, H, W, 3) output = tf.zeros_like(albedo) for i", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY", "ConvolutionNet(config, x): # x: (B, H, W, numInputChannels) # x: (Batch, H, W,", "(B, H, W, numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates], axis=3) #", "to make initial radius to be 1 def ConvolutionNet(config, x): # x: (B,", "THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy import tensorflow as tf from", "= activations.relu(x) + 1e-4 # to prevent all zero denoisingWeights = denoisingWeights /", "are permitted provided that the following conditions are met: 1. Redistributions of source", "_module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W, kernelArea) # [-inf, 0], for", "without modification, are permitted provided that the following conditions are met: 1. Redistributions", "conditions are met: 1. Redistributions of source code must retain the above copyright", "the candidate with least radius yi = candidates[:, :, :, lastCandidIdx:lastCandidIdx + 3]", "+ 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:,", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "3) output = tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start = i", "(B, H, W, kernelArea) wb = denoisingWeights[:, :, :, start:end] # (B, H,", "conv2d_last(x, config) return x def MainNet(config, input): # input: (B, H, W, numChannels)", "= z - tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return w def", ":, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "3 + 3] # (B, H, W, 3) denoised = weighted_average(yi - zb,", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "x): # x: (B, H, W, numInputChannels) # x: (Batch, H, W, 100)", ":, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var", "W, 3) zb = candidates[:, :, :, i * 3:i * 3 +", "copy import tensorflow as tf from tensorflow.keras import layers, activations from tensorflow.keras.initializers import", "(Batch, H, W, 100) x = conv2d(x, config) for i in range(8): #", "provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "form must reproduce the above copyright notice, this list of conditions and the", "ConvolutionNet(config, x) # (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) +", "= input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS']", "above copyright notice, this list of conditions and the following disclaimer in the", "_module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average =", "1. Redistributions of source code must retain the above copyright notice, this list", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"", "and use in source and binary forms, with or without modification, are permitted", "use in source and binary forms, with or without modification, are permitted provided", "return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']), activation=config[\"convActivation\"], padding='same', strides=(1, 1),", "copyright notice, this list of conditions and the following disclaimer in the documentation", ":, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']]", "numOutput) x = ConvolutionNet(config, x) # (B, H, W, kernelArea * (numCandidates-1)) denoisingWeights", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", ":, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3]", "in range(config['numCandidates'] - 1): start = i * config['kernelArea'] end = (i +", "lastCandidIdx = 3 * (config['numCandidates'] - 1) # (B, H, W, 3): the", "provided that the following conditions are met: 1. Redistributions of source code must", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "= tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0] weights = op.inputs[1] grads", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "config['kernelArea'] end = (i + 1) * config['kernelArea'] # (B, H, W, kernelArea)", "BSD 2-Clause License Copyright (c) 2021, CGLAB All rights reserved. Redistribution and use", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "= denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] - 1)", "* (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to prevent all zero denoisingWeights", "other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1] albedo = input[:, :, :,", "are met: 1. Redistributions of source code must retain the above copyright notice,", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "= tf.clip_by_value(grads, -1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): #", "* 3:i * 3 + 3] # (B, H, W, 3) denoised =", "# Constant to make initial radius to be 1 def ConvolutionNet(config, x): #", "SUCH DAMAGE. \"\"\" import copy import tensorflow as tf from tensorflow.keras import layers,", "source and binary forms, with or without modification, are permitted provided that the", "Constant to make initial radius to be 1 def ConvolutionNet(config, x): # x:", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same', strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) #", "(B, H, W, 3): the candidate with least radius yi = candidates[:, :,", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "denoised = weighted_average(yi - zb, wb) output += denoised # (B, H, W,", "to be 1 def ConvolutionNet(config, x): # x: (B, H, W, numInputChannels) #", "H, W, 3): the candidate with least radius yi = candidates[:, :, :,", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "config['kernelArea'] # (B, H, W, kernelArea) wb = denoisingWeights[:, :, :, start:end] #", "output = tf.zeros_like(albedo) for i in range(config['numCandidates'] - 1): start = i *", "prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3", "License Copyright (c) 2021, CGLAB All rights reserved. Redistribution and use in source", "3) zb = candidates[:, :, :, i * 3:i * 3 + 3]", "def ConvolutionNet(config, x): # x: (B, H, W, numInputChannels) # x: (Batch, H,", "import GlorotUniform, zeros _module = tf.load_op_library('./_weightaverage_ops.so') @tf.RegisterGradient(\"WeightedAverage\") def _weighted_average_grad(op, grad): image = op.inputs[0]", "3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :,", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "(c) 2021, CGLAB All rights reserved. Redistribution and use in source and binary", "= op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads = tf.clip_by_value(grads,", "grad): image = op.inputs[0] weights = op.inputs[1] grads = _module.weighted_average_grad(grad, image, weights) grads", "+ 1e-4 # to prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1,", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "(B, H, W, kernelArea * (numCandidates-1)) denoisingWeights = activations.relu(x) + 1e-4 # to", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "+ 1] albedo = input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:,", "axis=3) # x: (B, H, W, numOutput) x = ConvolutionNet(config, x) # (B,", "# x: (B, H, W, numInputChannels) # x: (Batch, H, W, 100) x", "zb, wb) output += denoised # (B, H, W, 1) sumWeights = tf.reduce_sum(wb,", "x: (Batch, H, W, 100) x = conv2d(x, config) # x: (Batch, H,", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\" import copy", "depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :,", "1] w = activations.softmax(w) return w def conv2d(x, config): return layers.Conv2D(filters=config['numFilters'], kernel_size=(config['convSize'], config['convSize']),", "(config['numCandidates'] - 1) # (B, H, W, 3): the candidate with least radius", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "kernelArea) # [-inf, 0], for numerical stability w = z - tf.reduce_max(z) #", "config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var =", "3:i * 3 + 3] # (B, H, W, 3) denoised = weighted_average(yi", "input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS'] + 3] depth = input[:, :, :, config['DEPTH_POS']:config['DEPTH_POS'] +", "H, W, 100) x = conv2d(x, config) # x: (Batch, H, W, numOutput)", "# x: (Batch, H, W, numOutput) x = conv2d_last(x, config) return x def", "input: (B, H, W, numChannels) N = input[:, :, :, config['ITERATION_POS']:config['ITERATION_POS'] + 1]", ":, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates = input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] +", "* config['numCandidates']] # x: (B, H, W, numInputChannels) x = tf.concat([albedo, normal, depth,", "1) # (B, H, W, 3): the candidate with least radius yi =", "end = (i + 1) * config['kernelArea'] # (B, H, W, kernelArea) wb", "[None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B, H, W, kernelArea)", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights, axis=-1, keepdims=True) lastCandidIdx = 3 * (config['numCandidates'] -", "x = conv2d(x, config) # x: (Batch, H, W, numOutput) x = conv2d_last(x,", "OF SUCH DAMAGE. \"\"\" import copy import tensorflow as tf from tensorflow.keras import", "input[:, :, :, config['CANDIDATE_POS'] :config['CANDIDATE_POS'] + 3 * config['numCandidates']] # x: (B, H,", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE", ":, config['DEPTH_POS']:config['DEPTH_POS'] + 1] var = input[:, :, :, config['VARIANCE_POS']:config['VARIANCE_POS'] + config['numCandidates']] candidates", "-1000000, 1000000) return [None, grads] weighted_average = _module.weighted_average def kernelPredictingWeights(z): # z: (B,", "activations.relu(x) + 1e-4 # to prevent all zero denoisingWeights = denoisingWeights / tf.reduce_sum(denoisingWeights,", "H, W, numOutput) x = ConvolutionNet(config, x) # (B, H, W, kernelArea *", "(B, H, W, 3) zb = candidates[:, :, :, i * 3:i *", "z - tf.reduce_max(z) # [0, 1] w = activations.softmax(w) return w def conv2d(x,", "- 1) # (B, H, W, 3): the candidate with least radius yi", "1 def ConvolutionNet(config, x): # x: (B, H, W, numInputChannels) # x: (Batch,", "W, numInputChannels) x = tf.concat([albedo, normal, depth, var, candidates], axis=3) # x: (B,", "initial radius to be 1 def ConvolutionNet(config, x): # x: (B, H, W,", "= input[:, :, :, config['ALBEDO_POS']:config['ALBEDO_POS'] + 3] normal = input[:, :, :, config['NORMAL_POS']:config['NORMAL_POS']", "strides=(1, 1), kernel_initializer=GlorotUniform(), bias_initializer=zeros())(x) def conv2d_last(x, config): return layers.Conv2D( filters=config['numOutput'], kernel_size=(config['convSize'], config['convSize']), padding='same',", "the following conditions are met: 1. Redistributions of source code must retain the", "+ 3] # (B, H, W, 3) output = tf.zeros_like(albedo) for i in" ]
[ "super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = []", "elem in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True)", "[] if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document =", "list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k]", "mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem in value: if isinstance(elem, list):", "if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] =", "__future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from", "in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else:", "from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem in value: if isinstance(elem,", "import OJAIDocument dump_document = OJAIDocument() for elem in value: if isinstance(elem, list): if", "import standard_library standard_library.install_aliases() from builtins import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__()", "isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem,", "division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import", "= dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else: ojai_list.append(dump_document.set('dump', elem).as_dictionary()['dump']) dump_document.clear() return", "nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and", "ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict = {} for k, v in", "import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * class OJAIList(list):", "k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value =", "tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags: dump_document = OJAITagsBuilder()", "from future import standard_library standard_library.install_aliases() from builtins import * class OJAIList(list): def __init__(self):", "@staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags:", "= OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem):", "__future__ import print_function from __future__ import division from __future__ import absolute_import from future", "if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument()", "bool(elem): tmp_dict = {} for k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k]", "= {} for k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v)", "import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins", "class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import", "from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases()", "if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list =", "def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags: dump_document", "= OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else:", "OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else: ojai_list.append(dump_document.set('dump',", "from __future__ import unicode_literals from __future__ import print_function from __future__ import division from", "OJAIDocument dump_document = OJAIDocument() for elem in value: if isinstance(elem, list): if isinstance(dump_document,", "from __future__ import print_function from __future__ import division from __future__ import absolute_import from", "isinstance(elem, dict) and bool(elem): tmp_dict = {} for k, v in list(elem.items()): if", "__future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__", "ojai_list = [] if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument", "dict) and bool(elem): tmp_dict = {} for k, v in list(elem.items()): if isinstance(v,", "standard_library.install_aliases() from builtins import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def", "* class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder", "builtins import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False):", "for elem in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem,", "= OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict = {} for k,", "import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from", "tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for", "if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif", "OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict)", "{} for k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else:", "= OJAIDocument() for elem in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list", "print_function from __future__ import division from __future__ import absolute_import from future import standard_library", "else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else: ojai_list.append(dump_document.set('dump', elem).as_dictionary()['dump'])", "absolute_import from future import standard_library standard_library.install_aliases() from builtins import * class OJAIList(list): def", "= [] if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document", "= OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem in", "dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else: ojai_list.append(dump_document.set('dump', elem).as_dictionary()['dump']) dump_document.clear() return ojai_list", "__future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * class", "OJAIDocument() for elem in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list =", "import unicode_literals from __future__ import print_function from __future__ import division from __future__ import", "from builtins import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value,", "import OJAITagsBuilder ojai_list = [] if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument", "isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem)", "from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import *", "OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder", "__init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list =", "OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem in value:", "in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump']", "tmp_dict = {} for k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k] =", "set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags: dump_document =", "OJAITagsBuilder ojai_list = [] if tags: dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import", "for k, v in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value", "from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags: dump_document = OJAITagsBuilder() else:", "v in list(elem.items()): if isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump',", "tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict)", "tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict =", "import print_function from __future__ import division from __future__ import absolute_import from future import", "value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list", "isinstance(v, list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value", "else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem in value: if", "OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict = {} for k, v", "else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict = {}", "dump_document = OJAIDocument() for elem in value: if isinstance(elem, list): if isinstance(dump_document, OJAITagsBuilder):", "mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if tags: dump_document = OJAITagsBuilder() else: from", "unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import", "def __init__(self): super(OJAIList, self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list", "list): tmp_dict[k] = OJAIList.set_list(v) else: internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear()", "future import standard_library standard_library.install_aliases() from builtins import * class OJAIList(list): def __init__(self): super(OJAIList,", "list): if isinstance(dump_document, OJAITagsBuilder): nested_list = OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list)", "OJAIList.set_list(elem, tags=True) else: nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict", "and bool(elem): tmp_dict = {} for k, v in list(elem.items()): if isinstance(v, list):", "elif isinstance(elem, dict) and bool(elem): tmp_dict = {} for k, v in list(elem.items()):", "standard_library standard_library.install_aliases() from builtins import * class OJAIList(list): def __init__(self): super(OJAIList, self).__init__() @staticmethod", "self).__init__() @staticmethod def set_list(value, tags=False): from mapr.ojai.ojai.OJAITagsBuilder import OJAITagsBuilder ojai_list = [] if", "dump_document = OJAITagsBuilder() else: from mapr.ojai.ojai.OJAIDocument import OJAIDocument dump_document = OJAIDocument() for elem", "nested_list = OJAIList.set_list(elem) ojai_list.append(nested_list) elif isinstance(elem, dict) and bool(elem): tmp_dict = {} for", "internal_value = dump_document.set('dump', v).as_dictionary()['dump'] tmp_dict[k] = internal_value dump_document.clear() ojai_list.append(tmp_dict) else: ojai_list.append(dump_document.set('dump', elem).as_dictionary()['dump']) dump_document.clear()" ]
[ "estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize", "import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import", "first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap", "maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\",", "from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\",", "import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\"", "import utilities from estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import", "import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import", "bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__", "utilities from estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov", "import minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\", \"first_derivative\", \"bootstrap\",", "estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap", "from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__", "estimagic import utilities from estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting", "estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ = [", "minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\", \"first_derivative\", \"bootstrap\", \"estimate_msm\",", "from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from", "__version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\", \"first_derivative\", \"bootstrap\", \"estimate_msm\", \"get_moments_cov\",", "estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize", "estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\", \"first_derivative\",", "get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize", "import maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ = [ \"maximize\",", "from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__ = \"0.1.4\" __all__ =", "import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import", "estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize import minimize __version__ =", "= \"0.1.4\" __all__ = [ \"maximize\", \"minimize\", \"utilities\", \"first_derivative\", \"bootstrap\", \"estimate_msm\", \"get_moments_cov\", ]", "from estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from estimagic.estimation.msm_weighting import get_moments_cov from", "from estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from", "estimagic.estimation.msm_weighting import get_moments_cov from estimagic.inference.bootstrap import bootstrap from estimagic.optimization.optimize import maximize from estimagic.optimization.optimize", "from estimagic import utilities from estimagic.differentiation.derivatives import first_derivative from estimagic.estimation.estimate_msm import estimate_msm from" ]
[ "k): res = s[:k] l_res = k cur = res letters = set(cur)", "actual = longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test,", "set(cur) num_letters = len(letters) s_size = len(s) i = k while i <", "cur += chunk_size * letter i += chunk_size l_cur = len(cur) if l_cur", "len(s) i = k while i < len(s): letter = s[i] if letter", "'__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ]", "k = 2, the longest substring with k distinct characters is \"bcb\". \"\"\"", "- len(tmp) cur += chunk_size * letter i += chunk_size l_cur = len(cur)", "the longest substring that contains at most k distinct characters. For example, given", "= s[:k] l_res = k cur = res letters = set(cur) num_letters =", "= res letters = set(cur) num_letters = len(letters) s_size = len(s) i =", "l_cur > l_res: l_res = l_cur res = cur return res if __name__", "in zip(tests, answers): actual = longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\", "s[i] if letter not in letters and num_letters >= k: letters.remove(cur[0]) cur =", "num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size - i -", "letter i += chunk_size l_cur = len(cur) if l_cur > l_res: l_res =", "characters. For example, given s = \"abcba\" and k = 2, the longest", "\"bcb\". \"\"\" def longest_k(s, k): res = s[:k] l_res = k cur =", "an integer k and a string s, find the length of the longest", "distinct characters. For example, given s = \"abcba\" and k = 2, the", "1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp) cur", "= len(cur) if l_cur > l_res: l_res = l_cur res = cur return", "example, given s = \"abcba\" and k = 2, the longest substring with", "= len(letters) s_size = len(s) i = k while i < len(s): letter", "len(s): letter = s[i] if letter not in letters and num_letters >= k:", "\"\"\" def longest_k(s, k): res = s[:k] l_res = k cur = res", "len(letters) s_size = len(s) i = k while i < len(s): letter =", "res = s[:k] l_res = k cur = res letters = set(cur) num_letters", "in letters and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters +=", "k distinct characters. For example, given s = \"abcba\" and k = 2,", "chunk_size = s_size - i - len(tmp) cur += chunk_size * letter i", "2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ] for test, answer in", "letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp) cur +=", "tests = [[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ] for", "most k distinct characters. For example, given s = \"abcba\" and k =", "not in letters and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters", "longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer)", "cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size =", "= k cur = res letters = set(cur) num_letters = len(letters) s_size =", "message = \"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer) assert", "if __name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ] answers =", "= \"abcba\" and k = 2, the longest substring with k distinct characters", "letter = s[i] if letter not in letters and num_letters >= k: letters.remove(cur[0])", "= 2, the longest substring with k distinct characters is \"bcb\". \"\"\" def", "res letters = set(cur) num_letters = len(letters) s_size = len(s) i = k", "while i < len(s): letter = s[i] if letter not in letters and", "= cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size", "contains at most k distinct characters. For example, given s = \"abcba\" and", "a string s, find the length of the longest substring that contains at", "l_res: l_res = l_cur res = cur return res if __name__ == '__main__':", "l_cur res = cur return res if __name__ == '__main__': tests = [[\"abcba\",", "distinct characters is \"bcb\". \"\"\" def longest_k(s, k): res = s[:k] l_res =", "== '__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\",", "the longest substring with k distinct characters is \"bcb\". \"\"\" def longest_k(s, k):", "i < len(s): letter = s[i] if letter not in letters and num_letters", "else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size - i", "cur = res letters = set(cur) num_letters = len(letters) s_size = len(s) i", "= longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual,", "zip(tests, answers): actual = longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\ \"", "answer in zip(tests, answers): actual = longest_k(*test) message = \"Failed test {0}\\ngot {1}\"", "longest substring with k distinct characters is \"bcb\". \"\"\" def longest_k(s, k): res", "k distinct characters is \"bcb\". \"\"\" def longest_k(s, k): res = s[:k] l_res", "+= 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp)", "s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp) cur += chunk_size * letter", "answers): actual = longest_k(*test) message = \"Failed test {0}\\ngot {1}\" \\ \" expected", "res if __name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ] answers", "letter not in letters and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else:", "= len(s) i = k while i < len(s): letter = s[i] if", "s, find the length of the longest substring that contains at most k", "i += chunk_size l_cur = len(cur) if l_cur > l_res: l_res = l_cur", "For example, given s = \"abcba\" and k = 2, the longest substring", "l_res = l_cur res = cur return res if __name__ == '__main__': tests", "\"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer) assert actual ==", "def longest_k(s, k): res = s[:k] l_res = k cur = res letters", "s[:k] l_res = k cur = res letters = set(cur) num_letters = len(letters)", "{0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer) assert actual == answer, message", "len(tmp) cur += chunk_size * letter i += chunk_size l_cur = len(cur) if", "] for test, answer in zip(tests, answers): actual = longest_k(*test) message = \"Failed", "[[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ] for test, answer", "= [\"bcb\", \"ntain\", ] for test, answer in zip(tests, answers): actual = longest_k(*test)", "the length of the longest substring that contains at most k distinct characters.", "if l_cur > l_res: l_res = l_cur res = cur return res if", "is \"bcb\". \"\"\" def longest_k(s, k): res = s[:k] l_res = k cur", "[\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ] for test, answer in zip(tests,", "k while i < len(s): letter = s[i] if letter not in letters", "l_cur = len(cur) if l_cur > l_res: l_res = l_cur res = cur", "test, answer in zip(tests, answers): actual = longest_k(*test) message = \"Failed test {0}\\ngot", "if letter not in letters and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0])", "characters is \"bcb\". \"\"\" def longest_k(s, k): res = s[:k] l_res = k", "integer k and a string s, find the length of the longest substring", "string s, find the length of the longest substring that contains at most", "chunk_size * letter i += chunk_size l_cur = len(cur) if l_cur > l_res:", "\"\"\" Given an integer k and a string s, find the length of", "substring with k distinct characters is \"bcb\". \"\"\" def longest_k(s, k): res =", "test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer) assert actual == answer,", "res = cur return res if __name__ == '__main__': tests = [[\"abcba\", 2],", "num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp", "given s = \"abcba\" and k = 2, the longest substring with k", "__name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\",", "= s[i] if letter not in letters and num_letters >= k: letters.remove(cur[0]) cur", "and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter)", "] answers = [\"bcb\", \"ntain\", ] for test, answer in zip(tests, answers): actual", "find the length of the longest substring that contains at most k distinct", "\"ntain\", ] for test, answer in zip(tests, answers): actual = longest_k(*test) message =", "num_letters = len(letters) s_size = len(s) i = k while i < len(s):", "that contains at most k distinct characters. For example, given s = \"abcba\"", "= cur return res if __name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\",", "length of the longest substring that contains at most k distinct characters. For", "= k while i < len(s): letter = s[i] if letter not in", "= s_size - i - len(tmp) cur += chunk_size * letter i +=", "> l_res: l_res = l_cur res = cur return res if __name__ ==", "- i - len(tmp) cur += chunk_size * letter i += chunk_size l_cur", "k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter)", "k cur = res letters = set(cur) num_letters = len(letters) s_size = len(s)", "l_res = k cur = res letters = set(cur) num_letters = len(letters) s_size", ">= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp =", "chunk_size l_cur = len(cur) if l_cur > l_res: l_res = l_cur res =", "[\"bcb\", \"ntain\", ] for test, answer in zip(tests, answers): actual = longest_k(*test) message", "cur return res if __name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\", 4],", "answers = [\"bcb\", \"ntain\", ] for test, answer in zip(tests, answers): actual =", "letters and num_letters >= k: letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1", "letters = set(cur) num_letters = len(letters) s_size = len(s) i = k while", "k and a string s, find the length of the longest substring that", "4], ] answers = [\"bcb\", \"ntain\", ] for test, answer in zip(tests, answers):", "s_size = len(s) i = k while i < len(s): letter = s[i]", "and k = 2, the longest substring with k distinct characters is \"bcb\".", "for test, answer in zip(tests, answers): actual = longest_k(*test) message = \"Failed test", "\"abcba\" and k = 2, the longest substring with k distinct characters is", "longest substring that contains at most k distinct characters. For example, given s", "s_size - i - len(tmp) cur += chunk_size * letter i += chunk_size", "= set(cur) num_letters = len(letters) s_size = len(s) i = k while i", "return res if __name__ == '__main__': tests = [[\"abcba\", 2], [\"contains\", 4], ]", "cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size = s_size -", "s = \"abcba\" and k = 2, the longest substring with k distinct", "= s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp) cur += chunk_size *", "2, the longest substring with k distinct characters is \"bcb\". \"\"\" def longest_k(s,", "+= chunk_size l_cur = len(cur) if l_cur > l_res: l_res = l_cur res", "i = k while i < len(s): letter = s[i] if letter not", "substring that contains at most k distinct characters. For example, given s =", "of the longest substring that contains at most k distinct characters. For example,", "tmp = s[i:].lstrip(letter) chunk_size = s_size - i - len(tmp) cur += chunk_size", "< len(s): letter = s[i] if letter not in letters and num_letters >=", "longest_k(s, k): res = s[:k] l_res = k cur = res letters =", "letters.remove(cur[0]) cur = cur.lstrip(cur[0]) else: num_letters += 1 letters.update(letter) tmp = s[i:].lstrip(letter) chunk_size", "= \"Failed test {0}\\ngot {1}\" \\ \" expected {2}\".format(test, actual, answer) assert actual", "len(cur) if l_cur > l_res: l_res = l_cur res = cur return res", "= l_cur res = cur return res if __name__ == '__main__': tests =", "+= chunk_size * letter i += chunk_size l_cur = len(cur) if l_cur >", "at most k distinct characters. For example, given s = \"abcba\" and k", "Given an integer k and a string s, find the length of the", "i - len(tmp) cur += chunk_size * letter i += chunk_size l_cur =", "= [[\"abcba\", 2], [\"contains\", 4], ] answers = [\"bcb\", \"ntain\", ] for test,", "* letter i += chunk_size l_cur = len(cur) if l_cur > l_res: l_res", "and a string s, find the length of the longest substring that contains", "with k distinct characters is \"bcb\". \"\"\" def longest_k(s, k): res = s[:k]" ]
[ "[ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development", "\"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"]", "\"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements = { \"test\":", "f: requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements,", "requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\":", ":: Developers\", \"License :: Allen Institute Software License\", \"Natural Language :: English\", \"Programming", "Institute Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\",", "] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements =", "readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements = [\"codecov\",", "license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(),", "+ \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements, extras_require=extra_requirements,", "include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements, extras_require=extra_requirements, url=\"https://github.com/AllenCellModeling/CVAE_testbed\", version=\"0.1.0\", zip_safe=False, )", "\"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], }", "Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements,", "], description=\"A research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements,", "Python :: 3.7\", ], description=\"A research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\":", ":: 3.7\", ], description=\"A research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"]", "\"License :: Allen Institute Software License\", \"Natural Language :: English\", \"Programming Language ::", "3.6\", \"Programming Language :: Python :: 3.7\", ], description=\"A research testbed for conditional", "\"r\") as f: requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements,", "as history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements", ":: 3.6\", \"Programming Language :: Python :: 3.7\", ], description=\"A research testbed for", "= { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements,", "\"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements = {", "= history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import", "history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements =", "test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements,", "\"Natural Language :: English\", \"Programming Language :: Python :: 3.6\", \"Programming Language ::", "variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme +", "= readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\",", "Status :: 2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen Institute", "Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen Institute Software License\", \"Natural Language", "setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended Audience ::", "\"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\",", "test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [", "\"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements, extras_require=extra_requirements, url=\"https://github.com/AllenCellModeling/CVAE_testbed\",", "install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\",", "= [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\",", "[\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\",", "\"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\",", "[\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements =", "entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" +", "history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements =", "long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements,", "\"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\",", "\"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\")", "find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history", "python # -*- coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import setup,", "interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines()", "open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read()", "- Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen Institute Software License\", \"Natural", ":: Python :: 3.7\", ], description=\"A research testbed for conditional variational autoencoders\", entry_points={", "*dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 -", "= f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements,", "\"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements", "*setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2", "for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\",", "history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements, extras_require=extra_requirements, url=\"https://github.com/AllenCellModeling/CVAE_testbed\", version=\"0.1.0\", zip_safe=False,", "autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\"", "description=\"A research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen", "testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software", "[\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\",", "\"Development Status :: 2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen", "\"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with", "script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read()", "= [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\",", "+ history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\", tests_require=test_requirements, extras_require=extra_requirements, url=\"https://github.com/AllenCellModeling/CVAE_testbed\", version=\"0.1.0\",", "\"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\",", "\"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements,", "English\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\",", "conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme", "\"Intended Audience :: Developers\", \"License :: Allen Institute Software License\", \"Natural Language ::", "dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup(", "with open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history =", "\"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as", "readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements =", "<filename>setup.py #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools", "Institute Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3.6\",", "\"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\",", "Developers\", \"License :: Allen Institute Software License\", \"Natural Language :: English\", \"Programming Language", "utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\") as", "author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended Audience :: Developers\",", ":: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", ], description=\"A research", "License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\", name=\"CVAE_testbed\", packages=find_packages(), python_requires=\">=3.6\", setup_requires=setup_requirements, test_suite=\"CVAE_testbed/CVAE_testbed/tests\",", "setup, find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file:", "[ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\",", "\"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements =", "\"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\",", "*test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status ::", "[\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True,", "2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen Institute Software License\",", "setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ],", "\"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" + history,", "*requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status", "history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"]", "coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\")", "-*- coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import setup, find_packages with", "} setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended Audience", "extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [", "\"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements", "from setuptools import setup, find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read() with", "\"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\",", ":: Allen Institute Software License\", \"Natural Language :: English\", \"Programming Language :: Python", "\"Programming Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", ],", "as f: requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\":", "# -*- coding: utf-8 -*- \"\"\"The setup script.\"\"\" from setuptools import setup, find_packages", "setup script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\") as readme_file: readme =", ":: English\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python ::", "setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\",", "import setup, find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as", "classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License ::", "author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License", "research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] }, install_requires=requirements, license=\"Allen Institute", "f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\":", "Language :: Python :: 3.6\", \"Programming Language :: Python :: 3.7\", ], description=\"A", "dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\",", "as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements", "], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\", \"Intended", "*interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[ \"Development Status :: 2 - Pre-Alpha\",", "Python :: 3.6\", \"Programming Language :: Python :: 3.7\", ], description=\"A research testbed", "readme_file.read() with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\",", "with open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\",", "= [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\",", "open(\"HISTORY.rst\") as history_file: history = history_file.read() test_requirements = [\"codecov\", \"flake8\", \"pytest\", \"pytest-cov\", \"pytest-raises\"]", "\"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\",", "\"\"\"The setup script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\") as readme_file: readme", "= [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements", "Language :: Python :: 3.7\", ], description=\"A research testbed for conditional variational autoencoders\",", "-*- \"\"\"The setup script.\"\"\" from setuptools import setup, find_packages with open(\"README.rst\") as readme_file:", "\"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements,", "3.7\", ], description=\"A research testbed for conditional variational autoencoders\", entry_points={ \"console_scripts\": [\"CVAE_train=CVAE_testbed.bin.CVAE_train:train_model\"] },", "with open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements,", "License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3.6\", \"Programming Language", "\"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ] interactive_requirements = [\"altair\", \"jupyterlab\", \"matplotlib\"] with open(\"requirements.txt\", \"r\") as f:", "\"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\", classifiers=[", "setuptools import setup, find_packages with open(\"README.rst\") as readme_file: readme = readme_file.read() with open(\"HISTORY.rst\")", "interactive_requirements, \"all\": [ *requirements, *test_requirements, *setup_requirements, *dev_requirements, *interactive_requirements, ], } setup( author=\"<NAME>\", author_email=\"<EMAIL>\",", ":: 2 - Pre-Alpha\", \"Intended Audience :: Developers\", \"License :: Allen Institute Software", "\"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\", \"Sphinx>=2.0.0b1\", \"sphinx_rtd_theme>=0.3.1\", \"recommonmark>=0.5.0\", \"twine>=1.13.0\", \"pytest>=4.3.0\", \"pytest-cov==2.6.1\", \"pytest-raises>=0.10\", \"pytest-runner>=4.4\", ]", "Language :: English\", \"Programming Language :: Python :: 3.6\", \"Programming Language :: Python", "\"pytest-cov\", \"pytest-raises\"] setup_requirements = [\"pytest-runner\"] dev_requirements = [ \"bumpversion>=0.5.3\", \"wheel>=0.33.1\", \"flake8>=3.7.7\", \"tox>=3.5.2\", \"coverage>=5.0a4\",", "Allen Institute Software License\", \"Natural Language :: English\", \"Programming Language :: Python ::", "{ \"test\": test_requirements, \"setup\": setup_requirements, \"dev\": dev_requirements, \"interactive\": interactive_requirements, \"all\": [ *requirements, *test_requirements,", "Software License\", \"Natural Language :: English\", \"Programming Language :: Python :: 3.6\", \"Programming", "}, install_requires=requirements, license=\"Allen Institute Software License\", long_description=readme + \"\\n\\n\" + history, include_package_data=True, keywords=\"CVAE_testbed\",", "\"Programming Language :: Python :: 3.7\", ], description=\"A research testbed for conditional variational", "Audience :: Developers\", \"License :: Allen Institute Software License\", \"Natural Language :: English\",", "open(\"requirements.txt\", \"r\") as f: requirements = f.read().splitlines() extra_requirements = { \"test\": test_requirements, \"setup\":" ]
[ "s, x = input().split(' ') st = [] [st.append(int(x)) for x in input().split('", "<gh_stars>0 n, s, x = input().split(' ') st = [] [st.append(int(x)) for x", "input().split(' ')] for j in range(int(s)): st.pop() if int(x) in st: print('True') else:", "= [] [st.append(int(x)) for x in input().split(' ')] for j in range(int(s)): st.pop()", "st = [] [st.append(int(x)) for x in input().split(' ')] for j in range(int(s)):", "x in input().split(' ')] for j in range(int(s)): st.pop() if int(x) in st:", "[] [st.append(int(x)) for x in input().split(' ')] for j in range(int(s)): st.pop() if", "in range(int(s)): st.pop() if int(x) in st: print('True') else: st = sorted(st) if", "n, s, x = input().split(' ') st = [] [st.append(int(x)) for x in", "x = input().split(' ') st = [] [st.append(int(x)) for x in input().split(' ')]", "for j in range(int(s)): st.pop() if int(x) in st: print('True') else: st =", "int(x) in st: print('True') else: st = sorted(st) if st: print(st[0]) else: print('0')", "')] for j in range(int(s)): st.pop() if int(x) in st: print('True') else: st", "j in range(int(s)): st.pop() if int(x) in st: print('True') else: st = sorted(st)", "[st.append(int(x)) for x in input().split(' ')] for j in range(int(s)): st.pop() if int(x)", "= input().split(' ') st = [] [st.append(int(x)) for x in input().split(' ')] for", "') st = [] [st.append(int(x)) for x in input().split(' ')] for j in", "range(int(s)): st.pop() if int(x) in st: print('True') else: st = sorted(st) if st:", "for x in input().split(' ')] for j in range(int(s)): st.pop() if int(x) in", "st.pop() if int(x) in st: print('True') else: st = sorted(st) if st: print(st[0])", "in input().split(' ')] for j in range(int(s)): st.pop() if int(x) in st: print('True')", "input().split(' ') st = [] [st.append(int(x)) for x in input().split(' ')] for j", "if int(x) in st: print('True') else: st = sorted(st) if st: print(st[0]) else:" ]
[ "progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar =", "return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget():", "', progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do", "in range(0, 200, 5): # do something time.sleep(0.1) p.update(i + 1) p.finish() def", "error=1) i += 1 p.update(i, text=None) i += 1 p.update(i, text=False) i +=", "= [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(),", "max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in range(0, 200, 5): time.sleep(0.1)", "= [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ',", "' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets,", "p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for i in range(0,", "p.finish() def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), '", "CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\" def update(self, pbar): if 45", "class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\" def update(self, pbar): if", "' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [", "something p.start() for i in range(0, 200, 5): # do something time.sleep(0.1) p.update(i", "text=None) i += 1 p.update(i, text=False) i += 1 p.update(i, text=True, error='a') p.finish()", "p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs:", "progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>>", "update(self, pbar): if 45 < pbar.percentage() < 80: return 'Bigger Now ' +", "return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(),", "5): # do something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets =", "time, time.sleep) for i in range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5,", "[ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ]", "text='spam', error=1) i += 1 p.update(i, text=None) i += 1 p.update(i, text=False) i", "= progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in range(0, 200,", "+= 1 p.update(i, text=False) i += 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget():", "\"It's bigger between 45 and 80 percent\" def update(self, pbar): if 45 <", "variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in range(0, 200, 5): time.sleep(0.1) p.update(i", "test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ),", "error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict(", "progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ]", "text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d',", "') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000,", "', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'),", "for i in range(0, 200, 5): # do something time.sleep(0.1) p.update(i + 1)", "def update(self, pbar): if 45 < pbar.percentage() < 80: return 'Bigger Now '", "= progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for i in range(0, 200,", "', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for", "CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p =", "progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time',", "progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something", "[ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'),", "p.start() for i in range(0, 200, 5): # do something time.sleep(0.1) p.update(i +", "p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25,", "widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ',", "+= 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f", "45 < pbar.percentage() < 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else:", "eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for", "Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets =", "max_value=1000) # maybe do something p.start() for i in range(0, 200, 5): #", "80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def", "do something p.start() for i in range(0, 200, 5): # do something time.sleep(0.1)", "bigger between 45 and 80 percent\" def update(self, pbar): if 45 < pbar.percentage()", "range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i += 1", "' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'),", "dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in", "= progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i * 2) assert widget.mapping['eggs']", "i += 1 p.update(i, text=False) i += 1 p.update(i, text=True, error='a') p.finish() def", "< pbar.percentage() < 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return", "'>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe", ") bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i * 2)", "<gh_stars>100-1000 import time import progressbar class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80", "progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(),", "] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in", "p.update(i, text=None) i += 1 p.update(i, text=False) i += 1 p.update(i, text=True, error='a')", "precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep)", "1 p.update(i, text=None) i += 1 p.update(i, text=False) i += 1 p.update(i, text=True,", "+ 1, loss=.5, text='spam', error=1) i += 1 p.update(i, text=None) i += 1", "'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets", "p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), ']", "range(0, 200, 5): # do something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget():", "< 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar)", "percent\" def update(self, pbar): if 45 < pbar.percentage() < 80: return 'Bigger Now", "test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(),", "time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(),", "), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i *", "import progressbar class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\" def update(self,", "200, 5): # do something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets", "def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(),", "i in range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i", "80 percent\" def update(self, pbar): if 45 < pbar.percentage() < 80: return 'Bigger", "bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i * 2) assert", "i += 1 p.update(i, text=None) i += 1 p.update(i, text=False) i += 1", "maybe do something p.start() for i in range(0, 200, 5): # do something", "progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i * 2) assert widget.mapping['eggs'] ==", "pbar.percentage() < 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self,", "def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3,", "'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[", "%(eggs)d', dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for i", "in range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i +=", "%(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget,", "1) p.finish() def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(),", "1 p.update(i, text=False) i += 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget", "eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i", "spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ]) for i in bar(range(5)):", "# CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p", "p.update(i + 1, loss=.5, text='spam', error=1) i += 1 p.update(i, text=None) i +=", "something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets = [ ' [',", "between 45 and 80 percent\" def update(self, pbar): if 45 < pbar.percentage() <", "and 80 percent\" def update(self, pbar): if 45 < pbar.percentage() < 80: return", "import time import progressbar class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\"", "def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (',", "for i in range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1)", "p.update(i, text=False) i += 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget =", "if 45 < pbar.percentage() < 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self, pbar)", "p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in range(0,", "+ 1) p.finish() def test_variable_widget_widget(): widgets = [ ' [', progressbar.Timer(), '] ',", "+= 1 p.update(i, text=None) i += 1 p.update(i, text=False) i += 1 p.update(i,", "i += 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam:", "progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) #", "progressbar class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\" def update(self, pbar):", "<<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), ' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000)", "kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar = progressbar.ProgressBar(widgets=[ widget, ])", "progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i", "progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for i in range(0, 200, 5):", "widgets = [ ' [', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ')", "45 and 80 percent\" def update(self, pbar): if 45 < pbar.percentage() < 80:", "time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i += 1 p.update(i, text=None) i", "+ progressbar.FileTransferSpeed.update(self, pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ #", "progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for i", "progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start()", "'] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'),", "5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i += 1 p.update(i, text=None)", "[', progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error',", "pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ',", "progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets,", "time.sleep) for i in range(0, 200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam',", "test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<', progressbar.Bar(), '>>> ', progressbar.Percentage(), '", "', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined'))", "1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg,", "progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time, time.sleep) for", "progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for i in range(0, 200, 5):", "# do something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets = [", "print('time', time, time.sleep) for i in range(0, 200, 5): time.sleep(0.1) p.update(i + 1,", "else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), ' <<<',", "] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start() for i in", "widget, ]) for i in bar(range(5)): widget.update_mapping(eggs=i * 2) assert widget.mapping['eggs'] == bar.widgets[0].mapping['eggs']", "pbar): if 45 < pbar.percentage() < 80: return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self,", "loss=.5, text='spam', error=1) i += 1 p.update(i, text=None) i += 1 p.update(i, text=False)", "' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p", "= progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), ) bar", "# maybe do something p.start() for i in range(0, 200, 5): # do", "(', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p =", "' ', progressbar.ETA(), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000) # maybe do something p.start()", "progressbar.Timer(), '] ', progressbar.Bar(), ' (', progressbar.ETA(), ') ', progressbar.Variable('loss'), progressbar.Variable('text'), progressbar.Variable('error', precision=None),", "time import progressbar class CrazyFileTransferSpeed(progressbar.FileTransferSpeed): \"It's bigger between 45 and 80 percent\" def", "i in range(0, 200, 5): # do something time.sleep(0.1) p.update(i + 1) p.finish()", "progressbar.Variable('error', precision=None), progressbar.Variable('missing'), progressbar.Variable('predefined'), ] p = progressbar.ProgressBar(widgets=widgets, max_value=1000, variables=dict(predefined='predefined')) p.start() print('time', time,", "do something time.sleep(0.1) p.update(i + 1) p.finish() def test_variable_widget_widget(): widgets = [ '", "p.start() print('time', time, time.sleep) for i in range(0, 200, 5): time.sleep(0.1) p.update(i +", "widget = progressbar.FormatCustomText( 'Spam: %(spam).1f kg, eggs: %(eggs)d', dict( spam=0.25, eggs=3, ), )", "text=False) i += 1 p.update(i, text=True, error='a') p.finish() def test_format_custom_text_widget(): widget = progressbar.FormatCustomText(", "1, loss=.5, text='spam', error=1) i += 1 p.update(i, text=None) i += 1 p.update(i,", "200, 5): time.sleep(0.1) p.update(i + 1, loss=.5, text='spam', error=1) i += 1 p.update(i,", "pbar) else: return progressbar.FileTransferSpeed.update(self, pbar) def test_crazy_file_transfer_speed_widget(): widgets = [ # CrazyFileTransferSpeed(), '" ]
[ "NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows interactive", "import box, inspect from rich.align import Align from rich.console import Console from rich.panel", "for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True )", "self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\"", "Text from sortedcontainers import SortedSet from ._nubia import _Exit, _Help from .constants import", "import os from copy import copy from typing import Any, List, Tuple from", "SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"),", "# Need to set this to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self)", "None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types = SortedSet() title_panel = Panel.fit(", "args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever", "Console from rich.panel import Panel from rich.text import Text from sortedcontainers import SortedSet", "_replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k)", "box, inspect from rich.align import Align from rich.console import Console from rich.panel import", "DataTarget is connected to and the related runtime self.target = None self.target_friendly_name =", "TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\", box=box.SIMPLE, ) self.console.print(Align.center(title_panel)) self._replace_internal_cmds(override=False)", "_Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True)", "tokens def _replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k)", "end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\", box=box.SIMPLE, ) self.console.print(Align.center(title_panel))", "context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import", "available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"),", "(Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name))", "self.runtime = None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append(", "from rich.align import Align from rich.console import Console from rich.panel import Panel from", "from copy import copy from typing import Any, List, Tuple from nubia import", "class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows interactive mode\"\"\" # Need", "rich.console import Console from rich.panel import Panel from rich.text import Text from sortedcontainers", "def _replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k)", "_Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the", "Tuple from nubia import context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import", "def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space,", "override) def reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd", "is connected to and the related runtime self.target = None self.target_friendly_name = \"\"", "to and the related runtime self.target = None self.target_friendly_name = \"\" self.runtime =", "Token from rich import box, inspect from rich.align import Align from rich.console import", "None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold", "\"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens", "def reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in", "on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be", ".constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app.", "from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token", "import SortedSet from ._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII,", "SortedSet from ._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT", "exit and help with less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override)", "from nubia import context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand", "style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\", box=box.SIMPLE,", "for exit and help with less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(),", "\"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\"))", "override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) #", "from rich.text import Text from sortedcontainers import SortedSet from ._nubia import _Exit, _Help", "import copy from typing import Any, List, Tuple from nubia import context, eventbus", "This will be whatever the DataTarget is connected to and the related runtime", "pygments.token import Name, Token from rich import box, inspect from rich.align import Align", "copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and help", "rich.text import Text from sortedcontainers import SortedSet from ._nubia import _Exit, _Help from", "allows interactive mode\"\"\" # Need to set this to allow initialization available_obj_types =", "import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context", "cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands for", "= None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types = SortedSet() title_panel =", "self._registry._completer.words.remove(k) # Readd commands for exit and help with less aliases for cmd", "Turdshovel app. Only allows interactive mode\"\"\" # Need to set this to allow", "List, Tuple from nubia import context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase", "nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token from", "in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose = args.verbose", "related runtime self.target = None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types =", "#52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\", box=box.SIMPLE, )", "import Text from sortedcontainers import SortedSet from ._nubia import _Exit, _Help from .constants", "commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True", "Panel from rich.text import Text from sortedcontainers import SortedSet from ._nubia import _Exit,", "AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True)", "from rich import box, inspect from rich.align import Align from rich.console import Console", "get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"),", "\"@\")) return tokens def _replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if", "in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and", "= Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the DataTarget is connected to", "all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd,", "import Any, List, Tuple from nubia import context, eventbus from nubia.internal import cmdloader", "Need to set this to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) ->", "# Readd commands for exit and help with less aliases for cmd in", "the related runtime self.target = None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types", "k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for", "if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and help with", "Only allows interactive mode\"\"\" # Need to set this to allow initialization available_obj_types", "(Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return", "for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands", "= \"\" self.runtime = None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold", "tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ]", "the Turdshovel app. Only allows interactive mode\"\"\" # Need to set this to", "this to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens", "typing import Any, List, Tuple from nubia import context, eventbus from nubia.internal import", "NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen()", "rich import box, inspect from rich.align import Align from rich.console import Console from", "cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose = args.verbose self.console", "self.console.set_alt_screen() # This will be whatever the DataTarget is connected to and the", "be whatever the DataTarget is connected to and the related runtime self.target =", "self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and help with less aliases", "self.target = None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types = SortedSet() title_panel", "copy from typing import Any, List, Tuple from nubia import context, eventbus from", "cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose =", "import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token from rich", "from sortedcontainers import SortedSet from ._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES,", "TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows interactive mode\"\"\" # Need to", "initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine,", "= Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:'", "= None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT,", "from rich.panel import Panel from rich.text import Text from sortedcontainers import SortedSet from", "nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token from rich import box, inspect", "import Align from rich.console import Console from rich.panel import Panel from rich.text import", "override=True ) def on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() #", "List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \">", "self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose = args.verbose self.console =", "AutoCommand from pygments.token import Name, Token from rich import box, inspect from rich.align", ") def on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This", "COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows", "for the Turdshovel app. Only allows interactive mode\"\"\" # Need to set this", "def on_interactive(self, args): self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will", "(Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool): for k,", "._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context):", "from typing import Any, List, Tuple from nubia import context, eventbus from nubia.internal", "and the related runtime self.target = None self.target_friendly_name = \"\" self.runtime = None", "eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import Name,", "runtime self.target = None self.target_friendly_name = \"\" self.runtime = None self.available_obj_types = SortedSet()", "-> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound,", "whatever the DataTarget is connected to and the related runtime self.target = None", "import context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token", "os from copy import copy from typing import Any, List, Tuple from nubia", "self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool):", "rich.align import Align from rich.console import Console from rich.panel import Panel from rich.text", "Any, List, Tuple from nubia import context, eventbus from nubia.internal import cmdloader from", "TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows interactive mode\"\"\"", "from rich.console import Console from rich.panel import Panel from rich.text import Text from", "to set this to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any,", "nubia import context, eventbus from nubia.internal import cmdloader from nubia.internal.cmdbase import AutoCommand from", "\"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def", "reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES):", "[ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name:", "the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def", "the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS),", "Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the DataTarget is connected to and", "app. Only allows interactive mode\"\"\" # Need to set this to allow initialization", "(Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At,", "self.verbose = args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the", "TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only allows interactive mode\"\"\" #", "self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for", "the DataTarget is connected to and the related runtime self.target = None self.target_friendly_name", "\"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3,", "\"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3, (Name.Command,", "cmdloader from nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token from rich import", "= SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title,", "if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override:", "Readd commands for exit and help with less aliases for cmd in [_Exit,", "from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel", "self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and help with less aliases for", "[_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands for the context\"\"\"", "set this to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]:", "connected to and the related runtime self.target = None self.target_friendly_name = \"\" self.runtime", "from nubia.internal.cmdbase import AutoCommand from pygments.token import Name, Token from rich import box,", "bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd", "Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\",", "v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit and help with less", "Name, Token from rich import box, inspect from rich.align import Align from rich.console", "title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\",", "Align from rich.console import Console from rich.panel import Panel from rich.text import Text", "with less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads", "from ._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class", "(Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if self.target_friendly_name: tokens.insert(3,", "return tokens def _replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"):", "# This will be whatever the DataTarget is connected to and the related", "import Name, Token from rich import box, inspect from rich.align import Align from", "inspect from rich.align import Align from rich.console import Console from rich.panel import Panel", "\"\" self.runtime = None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\",", "tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool): for k, v in", "self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool): for k, v", "self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args):", "Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' *", "\"\"\"Context for the Turdshovel app. Only allows interactive mode\"\"\" # Need to set", "tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool): for", "to allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens =", "for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self, args): self.verbose", "allow initialization available_obj_types = SortedSet() def get_prompt_tokens(self) -> List[Tuple[Any, str]]: tokens = [", "context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command( AutoCommand(cmd, NUBIA_OPTIONS), override=True ) def on_interactive(self,", "style=\"bold #693F21\" ), border_style=\"bold #52311A\", subtitle=f\"{':poop:' * 36}\", box=box.SIMPLE, ) self.console.print(Align.center(title_panel)) self._replace_internal_cmds(override=False) self.registry.dispatch_message(eventbus.Message.CONNECTED)", "rich.panel import Panel from rich.text import Text from sortedcontainers import SortedSet from ._nubia", "\"\"\"Reloads all the commands for the context\"\"\" self._replace_internal_cmds(override=True) for cmd in cmdloader.load_commands(COMMAND_PACKAGES): self._registry.register_command(", "_Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for", "import Panel from rich.text import Text from sortedcontainers import SortedSet from ._nubia import", "help with less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self):", "] if self.target_friendly_name: tokens.insert(3, (Name.Command, self.target_friendly_name)) tokens.insert(3, (Token.At, \"@\")) return tokens def _replace_internal_cmds(self,", "SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ), border_style=\"bold", "= SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33), style=\"bold #52311A\", end=\"\").append( TITLE_TEXT, style=\"bold #693F21\" ),", "sortedcontainers import SortedSet from ._nubia import _Exit, _Help from .constants import COMMAND_PACKAGES, NUBIA_OPTIONS,", "aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the", "interactive mode\"\"\" # Need to set this to allow initialization available_obj_types = SortedSet()", "copy import copy from typing import Any, List, Tuple from nubia import context,", "in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands for the", "args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the DataTarget is", "str]]: tokens = [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"),", "= args.verbose self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the DataTarget", "self.target_friendly_name = \"\" self.runtime = None self.available_obj_types = SortedSet() title_panel = Panel.fit( Text(TITLE_ASCII.rjust(33),", "self.console = Console(soft_wrap=True) self.console.set_alt_screen() # This will be whatever the DataTarget is connected", "(Token.At, \"@\")) return tokens def _replace_internal_cmds(self, override: bool): for k, v in copy(self._registry._cmd_instance_map).items():", "commands for exit and help with less aliases for cmd in [_Exit, _Help]:", "less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all", "= [ (Token.NewLine, \"\\n\"), (Token.Title, \"Turdshovel\"), (Token.Space, \"\"), (Token.Pound, \"> \"), ] if", "from pygments.token import Name, Token from rich import box, inspect from rich.align import", "import AutoCommand from pygments.token import Name, Token from rich import box, inspect from", "import Console from rich.panel import Panel from rich.text import Text from sortedcontainers import", "will be whatever the DataTarget is connected to and the related runtime self.target", "import COMMAND_PACKAGES, NUBIA_OPTIONS, TITLE_ASCII, TITLE_TEXT class TurdshovelContext(context.Context): \"\"\"Context for the Turdshovel app. Only", "and help with less aliases for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def", "for cmd in [_Exit, _Help]: self._registry.register_command(cmd(), override) def reload_commands(self): \"\"\"Reloads all the commands", "v in copy(self._registry._cmd_instance_map).items(): if v.__module__.startswith(\"nubia.internal.commands\"): self._registry._cmd_instance_map.pop(k) self._registry._completer.meta_dict.pop(k) self._registry._completer.words.remove(k) # Readd commands for exit", "mode\"\"\" # Need to set this to allow initialization available_obj_types = SortedSet() def" ]
[ "= [e/float(sum_i) for e in Y[i]] return Y def get_classes(self): return self.classes def", "in not_processed: target = [0 for _ in self.classes] for pktype in e:", "for e in Y[i]] return Y def get_classes(self): return self.classes def get_headers(self): return", "Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed, sum1=False): Y", "im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not", "not_processed, sum1=False): Y = [] lcl = list(self.classes) for e in not_processed: target", "= 1 Y.append(target) if sum1: for i in range(len(Y)): sum_i = sum(Y[i]) Y[i]", "actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed, sum1=False):", "if sum1: for i in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for", "open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename", "ILoadSupervised from os.path import join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"):", "lcl = list(self.classes) for e in not_processed: target = [0 for _ in", "csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"])", "X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"])", "for e in not_processed: target = [0 for _ in self.classes] for pktype", "csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\")", "csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if", "Y def make_targets(self, not_processed, sum1=False): Y = [] lcl = list(self.classes) for e", "[0 for _ in self.classes] for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target)", "target = [0 for _ in self.classes] for pktype in e: target[lcl.index(pktype)] =", "for i in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e in", "\"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path,", "sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]] return Y def get_classes(self): return", "class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def get_all(self,", "= list(self.classes) for e in not_processed: target = [0 for _ in self.classes]", "self.classes] for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i", "not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y def", "X = [] Y = [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\")", "self.path = path self.classes = set() def get_all(self, sum1=False): X = [] Y", "csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im", "Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for", "import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set()", "row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if", "Y = [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader", "self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed, sum1=False): Y = [] lcl", "-*- coding: utf-8 -*- from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from", "= path self.classes = set() def get_all(self, sum1=False): X = [] Y =", "set() def get_all(self, sum1=False): X = [] Y = [] Ys_not_processed = []", "= [] lcl = list(self.classes) for e in not_processed: target = [0 for", "row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename)", "from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists", "self.classes = set() def get_all(self, sum1=False): X = [] Y = [] Ys_not_processed", "list(self.classes) for e in not_processed: target = [0 for _ in self.classes] for", "= [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader =", "self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys)", "self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed,", "sum1=False): Y = [] lcl = list(self.classes) for e in not_processed: target =", "Y = [] lcl = list(self.classes) for e in not_processed: target = [0", "if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return", "is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y", "from load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists import csv class LoadPokemon(ILoadSupervised):", "row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X,", "ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"])", "_ in self.classes] for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1:", "target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i in range(len(Y)): sum_i = sum(Y[i])", "Y.append(target) if sum1: for i in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i)", "[] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in", "for row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im =", "[] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj)", "not_processed: target = [0 for _ in self.classes] for pktype in e: target[lcl.index(pktype)]", "with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader:", "= join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys =", "make_targets(self, not_processed, sum1=False): Y = [] lcl = list(self.classes) for e in not_processed:", "return X, Y def make_targets(self, not_processed, sum1=False): Y = [] lcl = list(self.classes)", "join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = []", "e in Y[i]] return Y def get_classes(self): return self.classes def get_headers(self): return [\"image\"]#", "in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i in range(len(Y)): sum_i", "actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y", "coding: utf-8 -*- from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path", "= [0 for _ in self.classes] for pktype in e: target[lcl.index(pktype)] = 1", "\"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"])", "for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i in", "import join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path", "def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def get_all(self, sum1=False): X", "= sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]] return Y def get_classes(self):", "LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def get_all(self, sum1=False):", "# -*- coding: utf-8 -*- from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised", "if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"]", "imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys", "join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes", "[] Y = [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj:", "in self.classes] for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for", "range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]] return Y", "[e/float(sum_i) for e in Y[i]] return Y def get_classes(self): return self.classes def get_headers(self):", "get_all(self, sum1=False): X = [] Y = [] Ys_not_processed = [] with open(join(self.path,", "= [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y =", "in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]] return", "i in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]]", "sum1) return X, Y def make_targets(self, not_processed, sum1=False): Y = [] lcl =", "__init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def get_all(self, sum1=False): X =", "import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists import csv", "= [] Y = [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"), \"r\") as", "Y[i]] return Y def get_classes(self): return self.classes def get_headers(self): return [\"image\"]# None #self.headers", "actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1)", "path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def get_all(self, sum1=False): X = []", "= csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename):", "= [] with open(join(self.path, \"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row", "[] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed,", "Y[i] = [e/float(sum_i) for e in Y[i]] return Y def get_classes(self): return self.classes", "def make_targets(self, not_processed, sum1=False): Y = [] lcl = list(self.classes) for e in", "X, Y def make_targets(self, not_processed, sum1=False): Y = [] lcl = list(self.classes) for", "= set() def get_all(self, sum1=False): X = [] Y = [] Ys_not_processed =", "ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists import csv class", "sum1=False): X = [] Y = [] Ys_not_processed = [] with open(join(self.path, \"pokemon.csv\"),", "Y = self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed, sum1=False): Y =", "-*- from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import join,", "\"pokemon.csv\"), \"r\") as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename =", "e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i in range(len(Y)): sum_i =", "exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is", "for _ in self.classes] for pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if", "[] lcl = list(self.classes) for e in not_processed: target = [0 for _", "import ILoadSupervised from os.path import join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self,", "os.path import join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path =", "1 Y.append(target) if sum1: for i in range(len(Y)): sum_i = sum(Y[i]) Y[i] =", "pktype in e: target[lcl.index(pktype)] = 1 Y.append(target) if sum1: for i in range(len(Y)):", "def get_all(self, sum1=False): X = [] Y = [] Ys_not_processed = [] with", "in Y[i]] return Y def get_classes(self): return self.classes def get_headers(self): return [\"image\"]# None", "sum1: for i in range(len(Y)): sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e", "e in not_processed: target = [0 for _ in self.classes] for pktype in", "load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists import csv class LoadPokemon(ILoadSupervised): def", "utf-8 -*- from pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import", "sum_i = sum(Y[i]) Y[i] = [e/float(sum_i) for e in Y[i]] return Y def", "in csv_reader: imagename = join(self.path, \"images\", row[\"Name\"]+\".png\") if exists(imagename): im = ImageReader(imagename) X.append(im.get_frame(0))", "None: self.classes.add(row[\"Type2\"]) actual_ys.append(row[\"Type2\"]) Ys_not_processed.append(actual_ys) Y = self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self,", "from os.path import join, exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path", "pims import ImageReader from load_data.ILoadSupervised import ILoadSupervised from os.path import join, exists import", "csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes = set() def", "= ImageReader(imagename) X.append(im.get_frame(0)) self.classes.add(row[\"Type1\"]) actual_ys = [] actual_ys.append(row[\"Type1\"]) if row[\"Type2\"] is not None:", "= self.make_targets(Ys_not_processed, sum1) return X, Y def make_targets(self, not_processed, sum1=False): Y = []", "exists import csv class LoadPokemon(ILoadSupervised): def __init__(self, path=\"train_data/Folder_Videojuegos/pokemon-images-and-types\"): self.path = path self.classes =", "as csv_obj: csv_reader = csv.DictReader(csv_obj) for row in csv_reader: imagename = join(self.path, \"images\",", "path self.classes = set() def get_all(self, sum1=False): X = [] Y = []" ]
[ "This is longer than expected. Please make sure that every cell is compiling", "timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else: pass def", "import structural_helpers try: from assignment import POTUSgen importFlag = True except: importFlag =", "import * from structural import structural_helpers try: from assignment import POTUSgen importFlag =", "seems to be an error in the provided notebook. Please make sure that", "test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen),", "took more than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make sure", "POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen),", "= f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer than", "every cell is compiling and prevent complex structures.\" import_error = \"There seems to", "in the provided notebook. Please make sure that every cell is compiling without", "don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False):", "exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self):", "that every cell is compiling and prevent complex structures.\" import_error = \"There seems", "found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag", "Please make sure that every cell is compiling without an error.\" method_error =", "* from behavior.ngram_solutions import * from structural import structural_helpers try: from assignment import", "every cell is compiling without an error.\" method_error = \"Function %s could not", "could not be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def", "else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True,", "False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors'))", "timeout_decorator import * from behavior.ngram_solutions import * from structural import structural_helpers try: from", "more than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make sure that", "cell is compiling and prevent complex structures.\" import_error = \"There seems to be", "ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen),", "POTUSgen importFlag = True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180", "@timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else: pass", "the notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please", "try: from assignment import POTUSgen importFlag = True except: importFlag = False class", "an error.\" method_error = \"Function %s could not be found. Please don\\'t rename", "from structural import structural_helpers try: from assignment import POTUSgen importFlag = True except:", "import_error = \"There seems to be an error in the provided notebook. Please", "prevent complex structures.\" import_error = \"There seems to be an error in the", "('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen), True, self.method_error % ('next_word_max'))", "Please make sure that every cell is compiling and prevent complex structures.\" import_error", "structural import structural_helpers try: from assignment import POTUSgen importFlag = True except: importFlag", "def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error %", "self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen), True, self.method_error", "* from structural import structural_helpers try: from assignment import POTUSgen importFlag = True", "importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook", "True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen), True,", "structural_helpers try: from assignment import POTUSgen importFlag = True except: importFlag = False", "without an error.\" method_error = \"Function %s could not be found. Please don\\'t", "not be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self):", "is compiling without an error.\" method_error = \"Function %s could not be found.", "except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the", "is compiling and prevent complex structures.\" import_error = \"There seems to be an", "f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer than expected.", "self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max',", "False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook took more", "from timeout_decorator import * from behavior.ngram_solutions import * from structural import structural_helpers try:", "importFlag = True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error", "= True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error =", "the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error)", "than expected. Please make sure that every cell is compiling and prevent complex", "rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise", "raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs',", "= False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook took", "<filename>tests/structural/structural_test.py import unittest from timeout_decorator import * from behavior.ngram_solutions import * from structural", "import unittest from timeout_decorator import * from behavior.ngram_solutions import * from structural import", "is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error %", "test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs'))", "is longer than expected. Please make sure that every cell is compiling and", "and prevent complex structures.\" import_error = \"There seems to be an error in", "Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is", "from behavior.ngram_solutions import * from structural import structural_helpers try: from assignment import POTUSgen", "TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook took more than {TIMEOUT_CONSTANT}", "the provided notebook. Please make sure that every cell is compiling without an", "to be an error in the provided notebook. Please make sure that every", "def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors',", "= 180 time_error = f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This", "make sure that every cell is compiling and prevent complex structures.\" import_error =", "True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing", "be an error in the provided notebook. Please make sure that every cell", "expected. Please make sure that every cell is compiling and prevent complex structures.\"", "structures.\" import_error = \"There seems to be an error in the provided notebook.", "from assignment import POTUSgen importFlag = True except: importFlag = False class TestStructural(unittest.TestCase):", "time_error = f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer", "180 time_error = f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds. This is", "TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook took more than {TIMEOUT_CONSTANT} seconds.", "be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if", "% ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error % ('calculate_ngram_freqs')) self.assertIs(structural_helpers.check_for_function('next_word_max', POTUSgen), True, self.method_error %", "notebook took more than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make", "methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error) def test_notebook_import(self): if (importFlag is False): raise ImportError(self.import_error) else:", "import POTUSgen importFlag = True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT =", "complex structures.\" import_error = \"There seems to be an error in the provided", "assignment import POTUSgen importFlag = True except: importFlag = False class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT", "error in the provided notebook. Please make sure that every cell is compiling", "pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error % ('generate_ngram_successors')) self.assertIs(structural_helpers.check_for_function('calculate_ngram_freqs', POTUSgen), True, self.method_error", "= \"There seems to be an error in the provided notebook. Please make", "error.\" method_error = \"Function %s could not be found. Please don\\'t rename the", "sure that every cell is compiling and prevent complex structures.\" import_error = \"There", "class TestStructural(unittest.TestCase): TIMEOUT_CONSTANT = 180 time_error = f\"Importing the notebook took more than", "cell is compiling without an error.\" method_error = \"Function %s could not be", "\"Function %s could not be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError,", "\"There seems to be an error in the provided notebook. Please make sure", "an error in the provided notebook. Please make sure that every cell is", "compiling without an error.\" method_error = \"Function %s could not be found. Please", "method_error = \"Function %s could not be found. Please don\\'t rename the methods.\"", "{TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make sure that every cell", "%s could not be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT, timeout_exception=TimeoutError, exception_message=time_error)", "seconds. This is longer than expected. Please make sure that every cell is", "= \"Function %s could not be found. Please don\\'t rename the methods.\" @timeout_decorator.timeout(TIMEOUT_CONSTANT,", "(importFlag is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True, self.method_error", "than {TIMEOUT_CONSTANT} seconds. This is longer than expected. Please make sure that every", "sure that every cell is compiling without an error.\" method_error = \"Function %s", "make sure that every cell is compiling without an error.\" method_error = \"Function", "compiling and prevent complex structures.\" import_error = \"There seems to be an error", "provided notebook. Please make sure that every cell is compiling without an error.\"", "unittest from timeout_decorator import * from behavior.ngram_solutions import * from structural import structural_helpers", "notebook. Please make sure that every cell is compiling without an error.\" method_error", "behavior.ngram_solutions import * from structural import structural_helpers try: from assignment import POTUSgen importFlag", "if (importFlag is False): raise ImportError(self.import_error) else: pass def test_check_function_names(self): self.assertIs(structural_helpers.check_for_function('generate_ngram_successors', POTUSgen), True,", "import * from behavior.ngram_solutions import * from structural import structural_helpers try: from assignment", "longer than expected. Please make sure that every cell is compiling and prevent", "that every cell is compiling without an error.\" method_error = \"Function %s could" ]
[ "used to validate/sign requests handler_func (callable): a function to handle the authz request", "an error to deny the authz request. Args: client_secret (str): a secret string", "to deny the authz request. Args: client_secret (str): a secret string used to", "auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func})", "AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async", "authz request. Args: client_secret (str): a secret string used to validate/sign requests handler_func", "to validate/sign requests handler_func (callable): a function to handle the authz request address", "except Exception: raise HTTPError(401, 'denied') if not ret: ret = {} self.write(ret) def", "port \"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth,", "HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self,", "the token service. This function blocks. Notes on handler_func: This callable should expect", "import inspect from tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated,", "This function blocks. Notes on handler_func: This callable should expect a dict argument", "deny the authz request. Args: client_secret (str): a secret string used to validate/sign", "if not ret: ret = {} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs):", "Run an Authz client. Starts a web server that responds to authz requests", "a web server that responds to authz requests from the token service. This", "port=None, **kwargs): \"\"\" Run an Authz client. Starts a web server that responds", "issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {}", "function to handle the authz request address (str): bind address port (int): bind", "ret: ret = {} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run", "authz request address (str): bind address port (int): bind port \"\"\" auth =", "= Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args", "returned is embedded in the valid token. It should raise an error to", "should raise an error to deny the authz request. Args: client_secret (str): a", "else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not ret: ret", "on handler_func: This callable should expect a dict argument with additional data. Any", "get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data) except", "bind address port (int): bind port \"\"\" auth = Auth(client_secret, issuer='authz') server =", "@authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else:", "client. Starts a web server that responds to authz requests from the token", "RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func =", "if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise", "requests from the token service. This function blocks. Notes on handler_func: This callable", "ret = {} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an", "with additional data. Any information returned is embedded in the valid token. It", "request address (str): bind address port (int): bind port \"\"\" auth = Auth(client_secret,", "inspect from tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error)", "in the valid token. It should raise an error to deny the authz", "await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not", "is embedded in the valid token. It should raise an error to deny", "additional data. Any information returned is embedded in the valid token. It should", "that responds to authz requests from the token service. This function blocks. Notes", "Starts a web server that responds to authz requests from the token service.", "{} if address: startup_args['address'] = address if port: startup_args['port'] = port server.startup(**startup_args) loop", "import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler,", "requests handler_func (callable): a function to handle the authz request address (str): bind", "\"\"\" Authz client common code. \"\"\" import asyncio import inspect from tornado.web import", "from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func,", "embedded in the valid token. It should raise an error to deny the", "self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz client. Starts", "It should raise an error to deny the authz request. Args: client_secret (str):", "@catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret", "**kwargs): \"\"\" Run an Authz client. Starts a web server that responds to", "Authz client common code. \"\"\" import asyncio import inspect from tornado.web import HTTPError", "from the token service. This function blocks. Notes on handler_func: This callable should", "argument with additional data. Any information returned is embedded in the valid token.", "Authz client. Starts a web server that responds to authz requests from the", "\"\"\" Run an Authz client. Starts a web server that responds to authz", "initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async def get(self):", "HTTPError(401, 'denied') if not ret: ret = {} self.write(ret) def run(client_secret, handler_func, address=None,", "async def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret =", "'func': handler_func}) startup_args = {} if address: startup_args['address'] = address if port: startup_args['port']", "func @authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data)", "token. It should raise an error to deny the authz request. Args: client_secret", "self).initialize(**kwargs) self.func = func @authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret", "Any information returned is embedded in the valid token. It should raise an", "Exception: raise HTTPError(401, 'denied') if not ret: ret = {} self.write(ret) def run(client_secret,", "raise an error to deny the authz request. Args: client_secret (str): a secret", "{} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz client.", "handler_func (callable): a function to handle the authz request address (str): bind address", "ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied')", "if address: startup_args['address'] = address if port: startup_args['port'] = port server.startup(**startup_args) loop =", "rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs):", "an Authz client. Starts a web server that responds to authz requests from", "information returned is embedded in the valid token. It should raise an error", "func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async def get(self): try:", "'denied') if not ret: ret = {} self.write(ret) def run(client_secret, handler_func, address=None, port=None,", "RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func", "def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data)", "ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not ret: ret =", "import asyncio import inspect from tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler,", "startup_args['address'] = address if port: startup_args['port'] = port server.startup(**startup_args) loop = asyncio.get_event_loop() loop.run_forever()", "from tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class", "= RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {} if address:", "a secret string used to validate/sign requests handler_func (callable): a function to handle", "authz requests from the token service. This function blocks. Notes on handler_func: This", "valid token. It should raise an error to deny the authz request. Args:", "to authz requests from the token service. This function blocks. Notes on handler_func:", "\"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func':", "address port (int): bind port \"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs)", "= {} if address: startup_args['address'] = address if port: startup_args['port'] = port server.startup(**startup_args)", "server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {} if address: startup_args['address'] =", "super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func):", "= func @authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret = await", "secret string used to validate/sign requests handler_func (callable): a function to handle the", "raise HTTPError(401, 'denied') if not ret: ret = {} self.write(ret) def run(client_secret, handler_func,", "the authz request. Args: client_secret (str): a secret string used to validate/sign requests", "port (int): bind port \"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/',", "catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated", "run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz client. Starts a web", "address=None, port=None, **kwargs): \"\"\" Run an Authz client. Starts a web server that", "(str): a secret string used to validate/sign requests handler_func (callable): a function to", "data. Any information returned is embedded in the valid token. It should raise", "AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {} if address: startup_args['address'] = address", "def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async def", "Notes on handler_func: This callable should expect a dict argument with additional data.", "= address if port: startup_args['port'] = port server.startup(**startup_args) loop = asyncio.get_event_loop() loop.run_forever() server.stop()", "web server that responds to authz requests from the token service. This function", "= {} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz", "validate/sign requests handler_func (callable): a function to handle the authz request address (str):", "startup_args = {} if address: startup_args['address'] = address if port: startup_args['port'] = port", "import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def", "**kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error async def get(self): try: if", "(int): bind port \"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler,", "handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz client. Starts a web server", "self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not ret:", "a dict argument with additional data. Any information returned is embedded in the", "should expect a dict argument with additional data. Any information returned is embedded", "expect a dict argument with additional data. Any information returned is embedded in", "to handle the authz request address (str): bind address port (int): bind port", "address (str): bind address port (int): bind port \"\"\" auth = Auth(client_secret, issuer='authz')", "Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args =", "server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {} if", "(callable): a function to handle the authz request address (str): bind address port", "handler_func: This callable should expect a dict argument with additional data. Any information", "= self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not ret: ret = {}", "not ret: ret = {} self.write(ret) def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\"", "callable should expect a dict argument with additional data. Any information returned is", "server that responds to authz requests from the token service. This function blocks.", "This callable should expect a dict argument with additional data. Any information returned", "try: if inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception:", "bind port \"\"\" auth = Auth(client_secret, issuer='authz') server = RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth':", "handle the authz request address (str): bind address port (int): bind port \"\"\"", "authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func", "asyncio import inspect from tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer,", "blocks. Notes on handler_func: This callable should expect a dict argument with additional", "tornado.web import HTTPError from rest_tools.server import (Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler):", "common code. \"\"\" import asyncio import inspect from tornado.web import HTTPError from rest_tools.server", "self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if not ret: ret = {} self.write(ret)", "responds to authz requests from the token service. This function blocks. Notes on", "{'auth': auth, 'func': handler_func}) startup_args = {} if address: startup_args['address'] = address if", "client common code. \"\"\" import asyncio import inspect from tornado.web import HTTPError from", "(str): bind address port (int): bind port \"\"\" auth = Auth(client_secret, issuer='authz') server", "string used to validate/sign requests handler_func (callable): a function to handle the authz", "def run(client_secret, handler_func, address=None, port=None, **kwargs): \"\"\" Run an Authz client. Starts a", "RestServer(**kwargs) server.add_route('/', AuthzHandler, {'auth': auth, 'func': handler_func}) startup_args = {} if address: startup_args['address']", "class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs) self.func = func @authenticated @catch_error", "handler_func}) startup_args = {} if address: startup_args['address'] = address if port: startup_args['port'] =", "the authz request address (str): bind address port (int): bind port \"\"\" auth", "<reponame>WIPACrepo/token-service \"\"\" Authz client common code. \"\"\" import asyncio import inspect from tornado.web", "self.func = func @authenticated @catch_error async def get(self): try: if inspect.iscoroutinefunction(self.func): ret =", "dict argument with additional data. Any information returned is embedded in the valid", "request. Args: client_secret (str): a secret string used to validate/sign requests handler_func (callable):", "(Auth, RestHandler, RestServer, authenticated, catch_error) class AuthzHandler(RestHandler): def initialize(self, func, **kwargs): super(AuthzHandler, self).initialize(**kwargs)", "inspect.iscoroutinefunction(self.func): ret = await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401,", "Args: client_secret (str): a secret string used to validate/sign requests handler_func (callable): a", "code. \"\"\" import asyncio import inspect from tornado.web import HTTPError from rest_tools.server import", "address: startup_args['address'] = address if port: startup_args['port'] = port server.startup(**startup_args) loop = asyncio.get_event_loop()", "error to deny the authz request. Args: client_secret (str): a secret string used", "the valid token. It should raise an error to deny the authz request.", "service. This function blocks. Notes on handler_func: This callable should expect a dict", "= await self.func(self.auth_data) else: ret = self.func(self.auth_data) except Exception: raise HTTPError(401, 'denied') if", "token service. This function blocks. Notes on handler_func: This callable should expect a", "client_secret (str): a secret string used to validate/sign requests handler_func (callable): a function", "\"\"\" import asyncio import inspect from tornado.web import HTTPError from rest_tools.server import (Auth,", "function blocks. Notes on handler_func: This callable should expect a dict argument with", "a function to handle the authz request address (str): bind address port (int):", "auth, 'func': handler_func}) startup_args = {} if address: startup_args['address'] = address if port:" ]
[ "ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\")", "and roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n-", "#More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except", "the current initiative for a game that can be used as a reminder.\")", "= await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False,", "category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites)", "if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a number for a", "for a game that can be used as a reminder.\", description=\"/initiative [args]\\n\\n- args", "\" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for arg in", "if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for", "class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self,", "in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a game.\", description=\"/creategame", "attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) +", "for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 != None): #", "indicate order of initiative\\n\\nAllows you to set the current initiative for a game", "a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by spaces to indicate order", "list the player will go (optional).\\n\\nAdds a player to the initiative.') async def", "def check(reaction, user): return user == ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0,", "arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for a game that can", "overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels", "await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites)", "ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else:", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False,", "msg = \"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]: msg += \"{})", "to the initiative.') async def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx", "= Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for a game.\") @commands.has_role(\"Mod\") async", "name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name", "initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or name of the player you'd", "category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False),", "channel) # t-voice-chat (text channel) # T Sessions (voice channel) # Makes a", "discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session", "attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites)", "session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "want to delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def", "(text channel) # t-pc-sheets (text channel) # t-pc-visuals (text channel) # t-music (text", "self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await", "{⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True)", "def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client", "(text channel) # t-dice-rolls (text channel) # t-voice-chat (text channel) # T Sessions", "asyncio class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def", "def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1 != None and", "== ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): #", "discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None,", "ctx.channel.category_id self.initiatives[game] = [arg for arg in args] await ctx.send(\"Initiative saved!\") else: game", "name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name >:T\") else:", "client self.initiatives = {} self.gamemanager = GameManager() # Official Format: # Test (category", "server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a game.\", description=\"/creategame [arg1] [arg2]", "await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name >:T\") else: if(not", "index or name of the player you'd like to remove from initiative.\\n\\nRemoves a", "and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate", "def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed", "discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper()", "guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False),", "{ guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None),", "= discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel in category.channels: await", "a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None):", "if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await", "(category channel) # t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes (text channel)", "administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None,", "ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a", "in args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter", "T Sessions (voice channel) # Makes a game (category, channel, role, etc) in", "game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff msg", "Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels and roles for", "!= None): # Stuff msg = await ctx.send(\"Are you sure you want to", "counter = 1 for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1", "for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await", "Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels and roles for a", "appropriate channels and roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None):", ":(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg for arg in args] await", "await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1", "ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for", "(optional).\\n\\nAdds a player to the initiative.') async def addplayer(self, ctx, name:str, idx=None): game", "like to remove from initiative.\\n\\nRemoves a player from the initiative.\") async def removeplayer(self,", "[arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member", "necessary channels and roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 =", "\" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "\" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "\" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return", "overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True,", "roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels", "await role.delete() category = self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await category.delete()", "of initiative\\n\\nAllows you to set the current initiative for a game that can", "await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house", "!= None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No", "a number for a name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] =", "role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None,", "channel) # Makes a game (category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes", "a player from the initiative.\") async def removeplayer(self, ctx, arg): game = ctx.channel.category_id", "ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else:", "member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \"", "name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await", "== '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't", "role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) }", "initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name of the player you are", "await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set", "out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for", "(text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text channel) # t-pc-sheets (text channel)", "role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel in category.channels:", "async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1 != None", "you'd like to remove from initiative.\\n\\nRemoves a player from the initiative.\") async def", "await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites)", "msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the", "\" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "for channel in category.channels: await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\")", "player to the initiative.') async def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id", "else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a game.\",", "@commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1 !=", "mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role:", "description=\"/removeplayer [arg]\\n\\n- arg = The index or name of the player you'd like", "a reminder.\") async def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You", "Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels", "and roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm:", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\",", "= Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the", "category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\",", "reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel =", "for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\"", "elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That", "of the player you are adding to the initiative\\n- idx = Where in", "discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role =", "reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by spaces to indicate order of", "# T Sessions (voice channel) # Makes a game (category, channel, role, etc)", "args = Names separated by spaces to indicate order of initiative\\n\\nAllows you to", "current initiative for a game that can be used as a reminder.\") async", "\" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to", "await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await", "discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel in category.channels: await channel.delete()", "= self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await category.delete() await msg.delete() await", "player you are adding to the initiative\\n- idx = Where in the list", "Stuff msg = await ctx.send(\"Are you sure you want to delete \" +", "= { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None,", "def __init__(self, client): self.client = client self.initiatives = {} self.gamemanager = GameManager() #", "gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await", "def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives", "as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by spaces to indicate", "if(arg1 != None): # Stuff msg = await ctx.send(\"Are you sure you want", "the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a game.\", description=\"/creategame [arg1]", "music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role:", "'❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\")", "= discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites", "deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await", "initiative for a game that can be used as a reminder.\") async def", "attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \"", "\"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg)", "\" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites)", "{ guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None),", "channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff", "for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 =", "name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg for arg in", "rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "player from the initiative.\") async def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()):", "appropriate channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes", "spaces to indicate order of initiative\\n\\nAllows you to set the current initiative for", "gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member:", "check(reaction, user): return user == ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check)", "game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1", "category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await", "Official Format: # Test (category channel) # t-session-planning (text channel) # t-notes (text-channel)", "[arg for arg in args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg", "arg1=None, arg2=None, gm: discord.Member = None): if(arg1 != None and arg2 != None", "Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate", "await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for", "# t-music (text channel) # t-dice-rolls (text channel) # t-voice-chat (text channel) #", "saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for arg", "discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None,", "arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" #", "just a number for a name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game]", "None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number", "a game (category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels", "@commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n-", "Names separated by spaces to indicate order of initiative\\n\\nAllows you to set the", "channel) # t-pc-sheets (text channel) # t-pc-visuals (text channel) # t-music (text channel)", "else: await ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully", "t-pc-sheets (text channel) # t-pc-visuals (text channel) # t-music (text channel) # t-dice-rolls", "# Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱", "[arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member =", "initiative.') async def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx != None):", "t-voice-chat (text channel) # T Sessions (voice channel) # Makes a game (category,", "of the player you'd like to remove from initiative.\\n\\nRemoves a player from the", "self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully removed player!\") def", "+ \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await", "counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the", "client): self.client = client self.initiatives = {} self.gamemanager = GameManager() # Official Format:", "async def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have", "from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or name of the", "pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "a name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg for arg", "player!\") else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the", "await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False,", "ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1 != None and arg2 !=", "msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion", "category = self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await category.delete() await msg.delete()", "roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 != None):", "the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or name of the player", "the initiative.\") async def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1]", "Sessions (voice channel) # Makes a game (category, channel, role, etc) in the", "arg in args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\"", "role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a", "guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm:", "= ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ─────", "category.channels: await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'):", "# t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text channel) # t-pc-sheets", "ctx, name:str, idx=None): game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name)", "del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully removed player!\")", "None and arg2 != None and gm != None): # Stuff guild =", "channel in category.channels: await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0])", "await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None,", "right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows", "attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await", "== '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await", "for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member =", "GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client):", "channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for", "= The index or name of the player you'd like to remove from", "category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await", "you sure you want to delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\")", "def deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff msg = await ctx.send(\"Are", "idx=None): game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully", "for arg in args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg =", "(text channel) # t-pc-visuals (text channel) # t-music (text channel) # t-dice-rolls (text", "a player to the initiative.') async def addplayer(self, ctx, name:str, idx=None): game =", "\" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the", "the appropriate channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game", "player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name of the", "t-pc-visuals (text channel) # t-music (text channel) # t-dice-rolls (text channel) # t-voice-chat", "the player will go (optional).\\n\\nAdds a player to the initiative.') async def addplayer(self,", "etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles for a game.\",", "Makes a game (category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary", "notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "!= None): # Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos =", "a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and roles", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and", "= discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role", "await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and", "gm != None): # Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos", "ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n-", "(voice channel) # Makes a game (category, channel, role, etc) in the server", "await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\",", "(text channel) # T Sessions (voice channel) # Makes a game (category, channel,", "= None): if(arg1 != None and arg2 != None and gm != None):", "name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No", "await gm.add_roles(role) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None),", "be used as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by spaces", "Master\\n\\nMakes the necessary channels and roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self,", "@member = Game Master\\n\\nMakes the necessary channels and roles for a game.\") @commands.has_role(\"Mod\")", "None): # Stuff msg = await ctx.send(\"Are you sure you want to delete", "pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name of the player you", "channel) # T Sessions (voice channel) # Makes a game (category, channel, role,", "⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos)", "self.initiatives[game] = [arg for arg in args] await ctx.send(\"Initiative saved!\") else: game =", "# print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\\n\\n-", "reminder.\") async def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't", "and gm != None): # Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\")", "= await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member", "self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name >:T\")", "initiative for a game that can be used as a reminder.\", description=\"/initiative [args]\\n\\n-", "# t-pc-sheets (text channel) # t-pc-visuals (text channel) # t-music (text channel) #", "if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name!", "Name/Campaign\\n\\nDeletes the appropriate channels and roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self,", "member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1))", "+2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await", "game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))]", "as a reminder.\") async def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await", "= Game Master\\n\\nMakes the necessary channels and roles for a game.\") @commands.has_role(\"Mod\") async", "discord from discord.ext import commands import random import asyncio class GameManager(): def __init__(self):", "@member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member = Game", "= Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels and roles", "await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\",", "await ctx.send(\"Are you sure you want to delete \" + str(arg1) + \"?\")", "@commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or", "The index or name of the player you'd like to remove from initiative.\\n\\nRemoves", "= GameManager() # Official Format: # Test (category channel) # t-session-planning (text channel)", "= [arg for arg in args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id", "await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a", "overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member:", "# Test (category channel) # t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes", "order of initiative\\n\\nAllows you to set the current initiative for a game that", "await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites)", "discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel", "Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for a game.\") @commands.has_role(\"Mod\") async def", "+ \" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role:", "await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More", "await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully removed player!\") def setup(client):", "+ str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user", "# Makes a game (category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the", "(text channel) # t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text channel)", "else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else:", "t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text channel) # t-pc-sheets (text", "Game Master\\n\\nMakes the necessary channels and roles for a game.\") @commands.has_role(\"Mod\") async def", "__init__(self, client): self.client = client self.initiatives = {} self.gamemanager = GameManager() # Official", "arg2 = Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels and", "await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1))", "import commands import random import asyncio class GameManager(): def __init__(self): self.setup() def setup(self):", "gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \"", "await ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added", "from the initiative.\") async def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del", "await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await", "pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\")", "= await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "+ \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await", "await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a game.\", description=\"/deletegame", "# t-stars-and-wishes (text channel) # t-pc-basics (text channel) # t-pc-sheets (text channel) #", "initiative\\n\\nAllows you to set the current initiative for a game that can be", "initiative.\") async def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await", "house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and", "you want to delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\")", "and arg2 != None and gm != None): # Stuff guild = ctx.guild", "ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff", "voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\")", "the appropriate channels and roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx,", "adding to the initiative\\n- idx = Where in the list the player will", "else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.',", "a game that can be used as a reminder.\") async def initiative(self, ctx,", "name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = {", "administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2)", "ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player", "Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category =", "number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else:", "name of the player you are adding to the initiative\\n- idx = Where", "[args]\\n\\n- args = Names separated by spaces to indicate order of initiative\\n\\nAllows you", "attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None)", "to remove from initiative.\\n\\nRemoves a player from the initiative.\") async def removeplayer(self, ctx,", "administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() +", "can be used as a reminder.\") async def initiative(self, ctx, *args): if(len(args) !=", "import random import asyncio class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\")", "be used as a reminder.\") async def initiative(self, ctx, *args): if(len(args) != 0):", "game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]:", "role.delete() category = self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await category.delete() await", "self.initiatives = {} self.gamemanager = GameManager() # Official Format: # Test (category channel)", "msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.',", "(text channel) # t-music (text channel) # t-dice-rolls (text channel) # t-voice-chat (text", "def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just", "= 1 for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg", "arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del", "await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing", "the initiative\\n- idx = Where in the list the player will go (optional).\\n\\nAdds", "async def deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff msg = await", "GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives = {} self.gamemanager = GameManager()", "def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()):", "initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a", "overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await", "commands import random import asyncio class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager:", "for a game that can be used as a reminder.\") async def initiative(self,", "progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2", "# t-pc-visuals (text channel) # t-music (text channel) # t-dice-rolls (text channel) #", "user == ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'):", "roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2", "# t-dice-rolls (text channel) # t-voice-chat (text channel) # T Sessions (voice channel)", "name:str, idx=None): game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await", "Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ─────", "\" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await", "None and gm != None): # Stuff guild = ctx.guild progress_msg = await", "import discord from discord.ext import commands import random import asyncio class GameManager(): def", "Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError:", "msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author try: reaction = await self.client.wait_for('reaction_add',", "= ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added player!\")", "await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\",", "await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False,", "+= \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer", "added player!\") else: await ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name)", "deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff msg = await ctx.send(\"Are you", "+ \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await", "discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category", "\"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a", "await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for a game", "delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user):", "addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1,", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc", "self.client = client self.initiatives = {} self.gamemanager = GameManager() # Official Format: #", "else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for a", "[idx]\\n\\n- name = The name of the player you are adding to the", "name = The name of the player you are adding to the initiative\\n-", "channel) # t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text channel) #", "await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current", "the necessary channels and roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx,", "# t-voice-chat (text channel) # T Sessions (voice channel) # Makes a game", "msg = await ctx.send(\"Are you sure you want to delete \" + str(arg1)", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc", "can be used as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by", "name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for channel in", "print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name", "\" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await", "the player you'd like to remove from initiative.\\n\\nRemoves a player from the initiative.\")", "await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await", "arg2=None, gm: discord.Member = None): if(arg1 != None and arg2 != None and", "None): # Stuff guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles,", "ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative", "channel) # t-pc-basics (text channel) # t-pc-sheets (text channel) # t-pc-visuals (text channel)", "pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\",", "a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or name", "# t-pc-basics (text channel) # t-pc-sheets (text channel) # t-pc-visuals (text channel) #", "guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm:", "} category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await", "channel) # t-music (text channel) # t-dice-rolls (text channel) # t-voice-chat (text channel)", "arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg", "category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites)", "ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n-", "await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author try: reaction", "current initiative for a game that can be used as a reminder.\", description=\"/initiative", "discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice", "self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client", "administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None,", "[name] [idx]\\n\\n- name = The name of the player you are adding to", "you are adding to the initiative\\n- idx = Where in the list the", "channels and roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1", "administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category =", "can't have just a number for a name, sorry :(\") else: game =", "(text channel) # t-pc-basics (text channel) # t-pc-sheets (text channel) # t-pc-visuals (text", "arg = The index or name of the player you'd like to remove", "await msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await", "await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer", "Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\")", "game = ctx.channel.category_id self.initiatives[game] = [arg for arg in args] await ctx.send(\"Initiative saved!\")", "in category.channels: await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) ==", "ctx.send(\"Are you sure you want to delete \" + str(arg1) + \"?\") await", "async def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx != None): if(not", "to delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction,", "used as a reminder.\") async def initiative(self, ctx, *args): if(len(args) != 0): if(str(args).isdecimal()):", "a game that can be used as a reminder.\", description=\"/initiative [args]\\n\\n- args =", "{} self.gamemanager = GameManager() # Official Format: # Test (category channel) # t-session-planning", "# Stuff msg = await ctx.send(\"Are you sure you want to delete \"", "import asyncio class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog):", "initiative\\n- idx = Where in the list the player will go (optional).\\n\\nAdds a", "!= None and gm != None): # Stuff guild = ctx.guild progress_msg =", ">:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index", "from initiative.\\n\\nRemoves a player from the initiative.\") async def removeplayer(self, ctx, arg): game", "print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives = {}", "\"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author try:", "ctx.send(\"You can't have just a number for a name, sorry :(\") else: game", "# Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category", "attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None)", "sure you want to delete \" + str(arg1) + \"?\") await msg.add_reaction(\"✅\") await", "for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg =", "@commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name", "if(str(args).isdecimal()): await ctx.send(\"You can't have just a number for a name, sorry :(\")", "} await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\",", "ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()):", "name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name! >:T\")", "the necessary channels and roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1", "by spaces to indicate order of initiative\\n\\nAllows you to set the current initiative", "[arg]\\n\\n- arg = The index or name of the player you'd like to", "# t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics", "channel) # t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes (text channel) #", "await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete() await ctx.send(\"Deletion Aborted!\")", ">:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number", "+ \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "+ \" voice chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete()", "random import asyncio class GameManager(): def __init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class", "try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel", "*args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a number for", "{}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player", "ctx, *args): if(len(args) != 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a number", "= await ctx.send(\"Are you sure you want to delete \" + str(arg1) +", "in the list the player will go (optional).\\n\\nAdds a player to the initiative.')", "to the initiative\\n- idx = Where in the list the player will go", "creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member = None): if(arg1 != None and arg2", "rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None),", "Test (category channel) # t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes (text", "= ctx.channel.category_id self.initiatives[game] = [arg for arg in args] await ctx.send(\"Initiative saved!\") else:", "initiative.\\n\\nRemoves a player from the initiative.\") async def removeplayer(self, ctx, arg): game =", "channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id)", "\"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name]", "name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await", "the current initiative for a game that can be used as a reminder.\",", "Format: # Test (category channel) # t-session-planning (text channel) # t-notes (text-channel) #", "channel) # t-dice-rolls (text channel) # t-voice-chat (text channel) # T Sessions (voice", "= {} self.gamemanager = GameManager() # Official Format: # Test (category channel) #", "administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\",", "chat\", overwrites=overwrites) await category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else:", "and roles for a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 !=", "= ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]: msg", "category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites)", "game = ctx.channel.category_id if(idx != None): if(not name.isdecimal()): self.initiatives[game].insert(int(idx)-1, name) await ctx.send(\"Successfully added", "+ \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes", "in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game])", "discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await", "ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles,", "star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "await ctx.send(\"You can't have just a number for a name, sorry :(\") else:", "channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the", "roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None, gm: discord.Member", "category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await", "+ \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author", "except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you", "that can be used as a reminder.\") async def initiative(self, ctx, *args): if(len(args)", "class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives = {} self.gamemanager =", "ctx.channel.category_id msg = \"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]: msg +=", "Where in the list the player will go (optional).\\n\\nAdds a player to the", "await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "idx = Where in the list the player will go (optional).\\n\\nAdds a player", "the initiative.') async def addplayer(self, ctx, name:str, idx=None): game = ctx.channel.category_id if(idx !=", "__init__(self): self.setup() def setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client =", "remove from initiative.\\n\\nRemoves a player from the initiative.\") async def removeplayer(self, ctx, arg):", "async def removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\",", "the player you are adding to the initiative\\n- idx = Where in the", "channels and roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None, arg2=None,", "Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives = {} self.gamemanager", "+ \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites =", "ctx, arg1=None): if(arg1 != None): # Stuff msg = await ctx.send(\"Are you sure", "role: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) }", "= \"```Initiative:\\n\" counter = 1 for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter,", "The name of the player you are adding to the initiative\\n- idx =", "t-session-planning (text channel) # t-notes (text-channel) # t-stars-and-wishes (text channel) # t-pc-basics (text", "0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a number for a name, sorry", "None): if(arg1 != None and arg2 != None and gm != None): #", "or name of the player you'd like to remove from initiative.\\n\\nRemoves a player", "return user == ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) ==", "channels and roles for a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game", "discord.Member = None): if(arg1 != None and arg2 != None and gm !=", "set the current initiative for a game that can be used as a", "channel) # t-pc-visuals (text channel) # t-music (text channel) # t-dice-rolls (text channel)", "if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1))", "arg1=None): if(arg1 != None): # Stuff msg = await ctx.send(\"Are you sure you", "sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg for arg in args]", "from discord.ext import commands import random import asyncio class GameManager(): def __init__(self): self.setup()", "───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await", "Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary channels and roles for a game.\")", "player!\") else: await ctx.send(\"No number for name >:T\") else: if(not name.isdecimal()): self.initiatives[game].append(name) await", "await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member =", "check=check) if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles,", "go (optional).\\n\\nAdds a player to the initiative.') async def addplayer(self, ctx, name:str, idx=None):", "wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\") @commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles", "to set the current initiative for a game that can be used as", "category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites", "= The name of the player you are adding to the initiative\\n- idx", "attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\", overwrites=overwrites)", "arg1 = Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes", "description='/addplayer [name] [idx]\\n\\n- name = The name of the player you are adding", "self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes", "if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully removed", "arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to", "administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2) + \" voice chat\",", "are adding to the initiative\\n- idx = Where in the list the player", "msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg)", "msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author try: reaction =", "self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role", "await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name =", "= await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels,", "and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) +", "!= None and arg2 != None and gm != None): # Stuff guild", "arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for a game.\") @commands.has_role(\"Mod\")", "setup(self): print(\"GameManager: Loaded\") class GameManagerCog(commands.Cog): def __init__(self, client): self.client = client self.initiatives =", "# Official Format: # Test (category channel) # t-session-planning (text channel) # t-notes", "t-stars-and-wishes (text channel) # t-pc-basics (text channel) # t-pc-sheets (text channel) # t-pc-visuals", "dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None, administrator=None,", "removeplayer(self, ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\")", "\" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice rolls\", overwrites=overwrites) overwrites = {", "isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing arguments!\")", "basics\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "the list the player will go (optional).\\n\\nAdds a player to the initiative.') async", "!= 0): if(str(args).isdecimal()): await ctx.send(\"You can't have just a number for a name,", "used as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated by spaces to", "a game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game", "ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully", "ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await ctx.send(\"Missing", "self.gamemanager = GameManager() # Official Format: # Test (category channel) # t-session-planning (text", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \"", "separated by spaces to indicate order of initiative\\n\\nAllows you to set the current", "added player!\") else: await ctx.send(\"No number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from", "a game.\") @commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff", "t-music (text channel) # t-dice-rolls (text channel) # t-voice-chat (text channel) # T", "= discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete() category = self.client.get_channel(channel.id) for", "description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for a", "(category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and roles", "planning\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star", "game (category, channel, role, etc) in the server @commands.command(aliases=['Creategame','CreateGame','cg','Cg','cG','CG','gamecreate','Gamecreate','GameCreate','gc','Gc','gC','GC'],brief=\"Makes the necessary channels and", "\" dice rolls\", overwrites=overwrites) overwrites = { guild.default_role: discord.PermissionOverwrite(add_reactions=False, administrator=False, attach_files=False,ban_members=False,change_nickname=False,connect=False,create_instant_invite=False,deafen_members=False,embed_links=False,external_emojis=False,kick_members=False,manage_channels=False,manage_emojis=False,manage_guild=False,manage_messages=False,manage_nicknames=False,manage_permissions=False,manage_roles=False,manage_webhooks=False,mention_everyone=False,move_members=False,mute_members=False,priority_speaker=False,read_message_history=False,read_messages=False,request_to_speak=False,send_messages=False,send_tts_messages=False,speak=False,stream=False,use_external_emojis=False,use_slash_commands=False,use_voice_activation=False,view_audit_log=False,view_channel=False,view_guild_insights=False), role: discord.PermissionOverwrite(add_reactions=None,", "1 for arg in self.initiatives[game]: msg += \"{}) {}\\n\".format(counter, arg) counter+=1 msg +=", "await msg.add_reaction(\"❌\") def check(reaction, user): return user == ctx.author try: reaction = await", "player you'd like to remove from initiative.\\n\\nRemoves a player from the initiative.\") async", "ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await ctx.send(\"Successfully removed player!\") def setup(client): client.add_cog(GameManager(client))", "player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The index or name of", "= Where in the list the player will go (optional).\\n\\nAdds a player to", "@commands.has_role(\"Mod\") async def deletegame(self, ctx, arg1=None): if(arg1 != None): # Stuff msg =", "await category.create_text_channel(str(arg2) + \" pc sheets\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc visuals\",", "= client self.initiatives = {} self.gamemanager = GameManager() # Official Format: # Test", "number for name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg", "visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" dice", "ctx, arg): game = ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else:", "have just a number for a name, sorry :(\") else: game = ctx.channel.category_id", "else: game = ctx.channel.category_id self.initiatives[game] = [arg for arg in args] await ctx.send(\"Initiative", "name of the player you'd like to remove from initiative.\\n\\nRemoves a player from", "+ \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "game that can be used as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names", "gm: discord.Member = None): if(arg1 != None and arg2 != None and gm", "ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed", "a player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name of", "game.\", description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game Name", "ctx.send(\"Missing arguments!\") @commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for a game that", "str(arg1) + \"?\") await msg.add_reaction(\"✅\") await msg.add_reaction(\"❌\") def check(reaction, user): return user ==", "to indicate order of initiative\\n\\nAllows you to set the current initiative for a", "administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await category.create_text_channel(str(arg2)", "discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites =", "@commands.command(aliases=['Initiative','init','Init','i','I','initiate','Initiate'],brief=\"Allows you to set the current initiative for a game that can be", "game.\", description=\"/deletegame [arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for", "msg.delete() await ctx.send(\"Deletion Aborted!\") else: await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete()", "arg2 != None and gm != None): # Stuff guild = ctx.guild progress_msg", "number for a name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg", "discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None), member: discord.PermissionOverwrite(add_reactions=True, administrator=None, attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=True,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } await", "game that can be used as a reminder.\") async def initiative(self, ctx, *args):", "to the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The name of the player", "t-dice-rolls (text channel) # t-voice-chat (text channel) # T Sessions (voice channel) #", "category.delete() await msg.delete() await ctx.send(\"Successfully deleted!\") elif(str(reaction[0]) == '❌'): #More Stuff await msg.delete()", "timeout=60.0, check=check) if(str(reaction[0]) == '✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role =", "(text channel) # t-voice-chat (text channel) # T Sessions (voice channel) # Makes", "args] await ctx.send(\"Initiative saved!\") else: game = ctx.channel.category_id msg = \"```Initiative:\\n\" counter =", "you to set the current initiative for a game that can be used", "category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) + \" session planning\", overwrites=overwrites) await category.create_text_channel(str(arg2)", "await category.create_text_channel(str(arg2) + \" pc visuals\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" music\", overwrites=overwrites)", "ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒} ───── ⊰\").position", "self.client.get_channel(channel.id) for channel in category.channels: await channel.delete() await category.delete() await msg.delete() await ctx.send(\"Successfully", "guild = ctx.guild progress_msg = await ctx.send(\"Making...\") pos = discord.utils.get(ctx.guild.roles, name=\"⊱ ───── {⭒|PERSONAL|⭒}", "t-pc-basics (text channel) # t-pc-sheets (text channel) # t-pc-visuals (text channel) # t-music", "+= \"{}) {}\\n\".format(counter, arg) counter+=1 msg += \"```\" # print(self.initiatives[game]) await ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds", "'✅'): # Stuff channel = discord.utils.get(ctx.guild.channels, name=str(arg1)) role = discord.utils.get(ctx.guild.roles, name=str(arg1)) await role.delete()", "───── {⭒|PERSONAL|⭒} ───── ⊰\").position +2 member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1),", "[arg]\\n\\n- arg = Game Name/Campaign\\n\\nDeletes the appropriate channels and roles for a game.\")", "user): return user == ctx.author try: reaction = await self.client.wait_for('reaction_add', timeout=60.0, check=check) if(str(reaction[0])", "ctx.send(msg) @commands.command(aliases=['Addplayer','AddPlayer','initadd','Initadd','InitAdd'],brief='Adds a player to the initiative.', description='/addplayer [name] [idx]\\n\\n- name = The", "name! >:T\") @commands.command(aliases=['Removeplayer','RemovePlayer','initdel','Initdel','InitDel'],brief='Removes a player from the initiative.', description=\"/removeplayer [arg]\\n\\n- arg = The", "member = discord.utils.get(ctx.guild.roles, name=\"Member\") role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role)", "+ \" notes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" star and wishes\", overwrites=overwrites) await", "await ctx.send(\"That isn't right...\") except asyncio.TimeoutError: await msg.delete() await ctx.send(\"Timed out!\") else: await", "for a name, sorry :(\") else: game = ctx.channel.category_id self.initiatives[game] = [arg for", "@commands.command(aliases=['Deletegame','DeleteGame','dg','Dg','dG','DG','gamedelete','Gamedelete','GameDelete','gd','Gd','gD','GD'],brief=\"Deletes the appropriate channels and roles for a game.\", description=\"/deletegame [arg]\\n\\n- arg =", "overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" pc", "that can be used as a reminder.\", description=\"/initiative [args]\\n\\n- args = Names separated", "else: if(not name.isdecimal()): self.initiatives[game].append(name) await ctx.send(\"Successfully added player!\") else: await ctx.send(\"No number for", "= Names separated by spaces to indicate order of initiative\\n\\nAllows you to set", "discord.ext import commands import random import asyncio class GameManager(): def __init__(self): self.setup() def", "role = await guild.create_role(name=str(arg1), mentionable=True) await role.edit(position=pos) await gm.add_roles(role) overwrites = { guild.default_role:", "GameManager() # Official Format: # Test (category channel) # t-session-planning (text channel) #", "category.create_voice_channel(str(arg2).upper() + \" Sessions\", overwrites=overwrites) await progress_msg.delete() await ctx.send(\"Done!\") else: await ctx.send(\"Missing arguments!\")", "Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n- @member = Game Master\\n\\nMakes the necessary", "description=\"/initiative [args]\\n\\n- args = Names separated by spaces to indicate order of initiative\\n\\nAllows", "necessary channels and roles for a game.\") @commands.has_role(\"Mod\") async def creategame(self, ctx, arg1=None,", "= ctx.channel.category_id if(str(arg).isdecimal()): del self.initiatives[game][int(arg)-1] await ctx.send(\"Successfully removed player!\") else: del self.initiatives[game][self.initiatives[game].index(str(arg))] await", "will go (optional).\\n\\nAdds a player to the initiative.') async def addplayer(self, ctx, name:str,", "player will go (optional).\\n\\nAdds a player to the initiative.') async def addplayer(self, ctx,", "description=\"/creategame [arg1] [arg2] @member\\n\\n- arg1 = Game Name/Campaign\\n- arg2 = Game Name Abbreviation\\n-", "attach_files=True,ban_members=None,change_nickname=None,connect=True,create_instant_invite=None,deafen_members=None,embed_links=True,external_emojis=True,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=None,move_members=None,mute_members=None,priority_speaker=None,read_message_history=True,read_messages=True,request_to_speak=None,send_messages=True,send_tts_messages=None,speak=None,stream=None,use_external_emojis=None,use_slash_commands=None,use_voice_activation=None,view_audit_log=None,view_channel=None,view_guild_insights=None), gm: discord.PermissionOverwrite(add_reactions=None, administrator=None, attach_files=None,ban_members=None,change_nickname=None,connect=None,create_instant_invite=None,deafen_members=None,embed_links=None,external_emojis=None,kick_members=None,manage_channels=None,manage_emojis=None,manage_guild=None,manage_messages=None,manage_nicknames=None,manage_permissions=None,manage_roles=None,manage_webhooks=None,mention_everyone=True,move_members=None,mute_members=True,priority_speaker=True,read_message_history=None,read_messages=None,request_to_speak=None,send_messages=None,send_tts_messages=None,speak=True,stream=None,use_external_emojis=None,use_slash_commands=True,use_voice_activation=True,view_audit_log=None,view_channel=True,view_guild_insights=None) } category = await guild.create_category_channel(str(arg1)) await category.create_text_channel(str(arg2) +", "+ \" star and wishes\", overwrites=overwrites) await category.create_text_channel(str(arg2) + \" house rules\", overwrites=overwrites)", "if(arg1 != None and arg2 != None and gm != None): # Stuff" ]
[ "derived from recording_id.\"\"\" self.root = root self.recording_id = recording_id starts = [] ends", "frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start", "1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds", "- ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for", "individual segments and save those segments in a directory structure according to the", "numpy import os import math from xml.etree import ElementTree from scipy.io import wavfile", "the segments and speakers in the corresponding WAV file. It also contains a", "end if end < len(self.signal) else len(self.signal) block = self.signal[start:end] fbank = logfbank(block,", "winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return fbanks", "category, i): \"\"\"Read an individual segment WAV file. Returns the sample rate and", "2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time", "and a histogram of segment durations, optionally filtered for a speaker.\"\"\" if speaker", "the indices, start times, and end times of all segments labeled with the", "by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def", "if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments", "self.signal = None self.duration = None def read_recording(self): \"\"\"Read the WAV file corresponding", "while start < len(self.signal): end = start + blockSize * self.samplerate end =", "for m in range(minutes): start_minute = 60 * m end_minute = 60 *", "speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log')", "= [] ends = [] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root", "{0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50) pyplot.yscale('log')", "100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a", "speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in", "pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of time", "- starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2,", "pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a time series", "numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal = None self.duration = None", "else: i, starts, ends = recording.filter_speaker(speaker) durations = ends - starts fig =", "blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility =", "Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording,", "formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments in the recording", "start + blockSize * self.samplerate end = end if end < len(self.signal) else", "ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60 * m end_minute", "file in the directory root with a filename derived from recording_id.\"\"\" self.root =", "as WAV files for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new", "labeled with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index]", "time series and a histogram of segment durations, optionally filtered for a speaker.\"\"\"", "== speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds", "{0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1,", "root self.recording_id = recording_id starts = [] ends = [] speakers = []", "speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start, end,", "def plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram of segment intervals labeled", "os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start,", "pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)')", "pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)')", "require Pacific timezone, lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1])", "ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate = None", "for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50))", "Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50) pyplot.yscale('log') pyplot.xlabel('Volubility') pyplot.ylabel('Count') return fig", "self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording()", "fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization", "slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) /", "a directory structure according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if", "None self.signal = None self.duration = None def read_recording(self): \"\"\"Read the WAV file", "This should not require Pacific timezone, lookup lena format spec if formatted.startswith('PT') and", "pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility", "recording into individual segments and save those segments in a directory structure according", "volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility)", "starts = recording.starts ends = recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations", "= os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices,", "(s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration", "deferred because it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal =", "= logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start", "file. Returns the sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i))", "pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a time series and", "numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by", "\"\"\"Read an individual segment WAV file. Returns the sample rate and signal.\"\"\" filename", "pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series and a histogram", "parse_time(formatted): \"\"\"Returns the time in seconds indicated by the formatted string.\"\"\" # TODO:", "(s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval", "this recording into individual segments and save those segments in a directory structure", "if self.signal is None: self.read_recording() for start, end, speaker, i in zip(self.starts, self.ends,", "(proportion of time that speaker is speaking) as a time series and histogram.", "in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers =", "segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def", "self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds indicated by the formatted", "directory structure according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not", "indices, start times, and end times of all segments labeled with the speaker.\"\"\"", "segments and save those segments in a directory structure according to the identified", "range(minutes): start_minute = 60 * m end_minute = 60 * m + 60", "import pyplot from features import logfbank class Recording: \"\"\"Recording reads an ITS file", "format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number", "fbanks = numpy.zeros((0, 1, 26)) start = 0 while start < len(self.signal): end", "new Recording by reading the ITS file in the directory root with a", "Pacific timezone, lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def", "plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of time that speaker is speaking)", "= recording.starts ends = recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations =", "pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)')", "'{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts =", "2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def", "a time series and histogram of segment intervals labeled as speaker.\"\"\" i, starts,", "It also contains a method to split and save out individual segments as", "filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate", "series and histogram of segment intervals labeled as speaker.\"\"\" i, starts, ends =", "in the corresponding WAV file. It also contains a method to split and", "is speaking) as a time series and histogram. This analysis uses one minute", "histogram. This analysis uses one minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1]", "segment) def read_segment(self, category, i): \"\"\"Read an individual segment WAV file. Returns the", "zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0) volubility /= 60", "= [] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for", "start_minute = 60 * m end_minute = 60 * m + 60 for", "def read_recording(self): \"\"\"Read the WAV file corresponding to this Recording. This is deferred", "segments in a directory structure according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root,", "numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return fbanks def split_segments(self): \"\"\"Split the", "i, starts, ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60 *", "times, and end times of all segments labeled with the speaker.\"\"\" index =", "file corresponding to this Recording. This is deferred because it can be slow.\"\"\"", "segment durations, optionally filtered for a speaker.\"\"\" if speaker is None: starts =", "This is deferred because it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate,", "of all segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return", "a method to split and save out individual segments as WAV files for", "ElementTree from scipy.io import wavfile from matplotlib import pyplot from features import logfbank", "(min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50) pyplot.yscale('log') pyplot.xlabel('Volubility') pyplot.ylabel('Count')", "4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot", "= numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return fbanks def split_segments(self):", "scipy.io import wavfile from matplotlib import pyplot from features import logfbank class Recording:", "should not require Pacific timezone, lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'):", "= pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations", "from recording_id.\"\"\" self.root = root self.recording_id = recording_id starts = [] ends =", "self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1,", "def parse_time(formatted): \"\"\"Returns the time in seconds indicated by the formatted string.\"\"\" #", "self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start = 0 while start < len(self.signal):", "data about the segments and speakers in the corresponding WAV file. It also", "= [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'):", "into individual segments and save those segments in a directory structure according to", "None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4,", "speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number", "pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion", "os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start", "Returns the sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return", "starts = [] ends = [] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id)))", "also contains a method to split and save out individual segments as WAV", "time that speaker is speaking) as a time series and histogram. This analysis", "os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start, end, speaker, i in", "end = start + blockSize * self.samplerate end = end if end <", "/ self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks = numpy.zeros((0,", "histogram of segment durations, optionally filtered for a speaker.\"\"\" if speaker is None:", "recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir", "return fbanks def split_segments(self): \"\"\"Split the WAV file for this recording into individual", "ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker))", "pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count')", "pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording,", "for start, end in zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start),", "\"\"\"Plot the number of segments in the recording for each speaker.\"\"\" speakers, counts", "+ 0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0,", "and save out individual segments as WAV files for acoustic analysis.\"\"\" def __init__(self,", "WAV file. It also contains a method to split and save out individual", "ITS file in the directory root with a filename derived from recording_id.\"\"\" self.root", "uses one minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) /", "a speaker.\"\"\" if speaker is None: starts = recording.starts ends = recording.ends else:", "(s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count')", "< len(self.signal): end = start + blockSize * self.samplerate end = end if", "wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start times, and end times of", "Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4,", "= speakers self.samplerate = None self.signal = None self.duration = None def read_recording(self):", "return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by Speaker')", "formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments in", "start < len(self.signal): end = start + blockSize * self.samplerate end = end", "intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals,", "volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0) volubility /= 60 fig =", "= numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time", "in the directory root with a filename derived from recording_id.\"\"\" self.root = root", "start times, and end times of all segments labeled with the speaker.\"\"\" index", "WAV file. Returns the sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category,", "0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes),", "in seconds indicated by the formatted string.\"\"\" # TODO: This should not require", "matplotlib import pyplot from features import logfbank class Recording: \"\"\"Recording reads an ITS", "= starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization", "Durations for {0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)')", "if end < len(self.signal) else len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate,", "Recording: \"\"\"Recording reads an ITS file exported from LENA and parses out data", "with a filename derived from recording_id.\"\"\" self.root = root self.recording_id = recording_id starts", "self.ends = numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal = None self.duration", "logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start =", "root with a filename derived from recording_id.\"\"\" self.root = root self.recording_id = recording_id", "= None self.duration = None def read_recording(self): \"\"\"Read the WAV file corresponding to", "a new Recording by reading the ITS file in the directory root with", "filtered for a speaker.\"\"\" if speaker is None: starts = recording.starts ends =", "(s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of", "WAV files for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new Recording", "out individual segments as WAV files for acoustic analysis.\"\"\" def __init__(self, root, recording_id):", "read_segment(self, category, i): \"\"\"Read an individual segment WAV file. Returns the sample rate", "return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start times, and end times", "if self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start = 0", "= recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1)", "plot_speaker_counts(recording): \"\"\"Plot the number of segments in the recording for each speaker.\"\"\" speakers,", "pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized", "0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a", "recording_id.\"\"\" self.root = root self.recording_id = recording_id starts = [] ends = []", "tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime']))", "self.signal is None: self.read_recording() for start, end, speaker, i in zip(self.starts, self.ends, self.speakers,", "rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self,", "speaker is None: starts = recording.starts ends = recording.ends else: i, starts, ends", "speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an individual segment WAV", "recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:],", "self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an individual segment WAV file. Returns", "= recording_id starts = [] ends = [] speakers = [] tree =", "for start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start", "def plot_durations(recording, speaker=None): \"\"\"Plot a time series and a histogram of segment durations,", "can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration =", "4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot", "1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker", "speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds indicated", "from features import logfbank class Recording: \"\"\"Recording reads an ITS file exported from", "in range(minutes): start_minute = 60 * m end_minute = 60 * m +", "speaker is speaking) as a time series and histogram. This analysis uses one", "60 * m end_minute = 60 * m + 60 for start, end", "pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series and a", "is None: starts = recording.starts ends = recording.ends else: i, starts, ends =", "{0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log')", "(s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram", "else len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks =", "by the formatted string.\"\"\" # TODO: This should not require Pacific timezone, lookup", "= end if end < len(self.signal) else len(self.signal) block = self.signal[start:end] fbank =", "pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log')", "'{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an individual segment WAV file.", "recording.filter_speaker(speaker) durations = ends - starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts", "\"\"\"Read the WAV file corresponding to this Recording. This is deferred because it", "os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir,", "in zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0) volubility /=", "= root self.recording_id = recording_id starts = [] ends = [] speakers =", "segments in the recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig", "len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks,", "ITS file exported from LENA and parses out data about the segments and", "of segment intervals labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals =", "lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot", "TODO: This should not require Pacific timezone, lookup lena format spec if formatted.startswith('PT')", "pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio", "if speaker is None: starts = recording.starts ends = recording.ends else: i, starts,", "self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds indicated by the formatted string.\"\"\"", "one minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60)", "to this Recording. This is deferred because it can be slow.\"\"\" filepath =", "= None self.signal = None self.duration = None def read_recording(self): \"\"\"Read the WAV", "Recording. This is deferred because it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id))", "0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers))", "import logfbank class Recording: \"\"\"Recording reads an ITS file exported from LENA and", "recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers))", "os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self,", "float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments in the recording for each", "\"\"\"Plot a time series and a histogram of segment durations, optionally filtered for", "in the recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig =", "os import math from xml.etree import ElementTree from scipy.io import wavfile from matplotlib", "fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of time that speaker", "(len(fbank), 1, 26)))) start = end return fbanks def split_segments(self): \"\"\"Split the WAV", "end times of all segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers) ==", "speaker=None): \"\"\"Plot a time series and a histogram of segment durations, optionally filtered", "ends = recording.filter_speaker(speaker) durations = ends - starts fig = pyplot.figure() pyplot.subplot(2, 1,", "None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start = 0 while start <", "len(self.signal) else len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks", "60 for start, end in zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute,", "bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker):", "fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker))", "labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1]", "\"\"\"Return the indices, start times, and end times of all segments labeled with", "pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations for", "Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig", "return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds indicated by", "1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1,", "ratio (proportion of time that speaker is speaking) as a time series and", "self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start = 0 while", "os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start, end, speaker, i in zip(self.starts,", "pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0,", "self.root = root self.recording_id = recording_id starts = [] ends = [] speakers", "def split_segments(self): \"\"\"Split the WAV file for this recording into individual segments and", "import wavfile from matplotlib import pyplot from features import logfbank class Recording: \"\"\"Recording", "* numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2,", "from matplotlib import pyplot from features import logfbank class Recording: \"\"\"Recording reads an", "self.recording_id = recording_id starts = [] ends = [] speakers = [] tree", "- max(start_minute, start), 0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1)", "pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute')", "numpy.zeros((0, 1, 26)) start = 0 while start < len(self.signal): end = start", "by reading the ITS file in the directory root with a filename derived", "* self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an", "1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig", "as a time series and histogram. This analysis uses one minute blocks to", "and parses out data about the segments and speakers in the corresponding WAV", "Recording by reading the ITS file in the directory root with a filename", "\"\"\"Returns the time in seconds indicated by the formatted string.\"\"\" # TODO: This", "segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts,", "segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index],", "end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end", "number of segments in the recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers,", "1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig", "from scipy.io import wavfile from matplotlib import pyplot from features import logfbank class", "the recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure()", "self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end", "starts, ends = recording.filter_speaker(speaker) durations = ends - starts fig = pyplot.figure() pyplot.subplot(2,", "segments and speakers in the corresponding WAV file. It also contains a method", "fbanks def split_segments(self): \"\"\"Split the WAV file for this recording into individual segments", "def read_segment(self, category, i): \"\"\"Read an individual segment WAV file. Returns the sample", "and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments in the", "pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log')", "parses out data about the segments and speakers in the corresponding WAV file.", "[] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr'])", "self.read_recording() for start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment =", "starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2, durations)", "fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series and a histogram of segment", "speaker.\"\"\" if speaker is None: starts = recording.starts ends = recording.ends else: i,", "segments as WAV files for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a", "self.duration = None def read_recording(self): \"\"\"Read the WAV file corresponding to this Recording.", "bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker):", "speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers):", "root, recording_id): \"\"\"Construct a new Recording by reading the ITS file in the", "recording_id): \"\"\"Construct a new Recording by reading the ITS file in the directory", "= math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts, ends =", "= numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations", "pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None):", "m end_minute = 60 * m + 60 for start, end in zip(starts,", "max(start_minute, start), 0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60", "and histogram of segment intervals labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker)", "- recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for", "for {0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2,", "None def read_recording(self): \"\"\"Read the WAV file corresponding to this Recording. This is", "wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None:", "= len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks", "split_segments(self): \"\"\"Split the WAV file for this recording into individual segments and save", "= self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank),", "segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers", "that speaker is speaking) as a time series and histogram. This analysis uses", "self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal", "durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time (s)')", "if speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2)", "the WAV file corresponding to this Recording. This is deferred because it can", "len(self.signal): end = start + blockSize * self.samplerate end = end if end", "numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1,", "pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return", "numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return fbanks def split_segments(self): \"\"\"Split", "timezone, lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording):", "speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate", "self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if", "starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals", "time series and histogram of segment intervals labeled as speaker.\"\"\" i, starts, ends", "series and histogram. This analysis uses one minute blocks to aggregate segments.\"\"\" minutes", "speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig =", "self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)),", "+ 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot", "+ 60 for start, end in zip(starts, ends): volubility[m] += max(min(end_minute, end) -", "return fig def plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram of segment", "ends = recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations = ends -", "formatted string.\"\"\" # TODO: This should not require Pacific timezone, lookup lena format", "= os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir =", "= None def read_recording(self): \"\"\"Read the WAV file corresponding to this Recording. This", "ends - starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations /", "def filter_speaker(self, speaker): \"\"\"Return the indices, start times, and end times of all", "time series and histogram. This analysis uses one minute blocks to aggregate segments.\"\"\"", "+ durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None", "corresponding to this Recording. This is deferred because it can be slow.\"\"\" filepath", "self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1,", "series and a histogram of segment durations, optionally filtered for a speaker.\"\"\" if", "\"\"\"Plot a time series and histogram of segment intervals labeled as speaker.\"\"\" i,", "file for this recording into individual segments and save those segments in a", "block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank,", "for a speaker.\"\"\" if speaker is None: starts = recording.starts ends = recording.ends", "in a directory structure according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id)", "speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end *", "each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1,", "recording_id starts = [] ends = [] speakers = [] tree = ElementTree.parse(os.path.join(self.root,", "plot_durations(recording, speaker=None): \"\"\"Plot a time series and a histogram of segment durations, optionally", "self.samplerate = None self.signal = None self.duration = None def read_recording(self): \"\"\"Read the", "1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds /", "range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment)", "60) volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m in range(minutes):", "index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the", "aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i,", "for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if", "of time that speaker is speaking) as a time series and histogram. This", "This analysis uses one minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] -", "26)) start = 0 while start < len(self.signal): end = start + blockSize", "it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration", "a time series and a histogram of segment durations, optionally filtered for a", "\"\"\"Construct a new Recording by reading the ITS file in the directory root", "fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time", "the sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename)", "to split and save out individual segments as WAV files for acoustic analysis.\"\"\"", "self.speakers = speakers self.samplerate = None self.signal = None self.duration = None def", "i, starts, ends = recording.filter_speaker(speaker) durations = ends - starts fig = pyplot.figure()", "to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for", "speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations,", "signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return", "exported from LENA and parses out data about the segments and speakers in", "recording.starts ends = recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations = ends", "self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal", "start), 0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 *", "of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return", "recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60 * m end_minute = 60", "if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if", "individual segments as WAV files for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct", "else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100))", "to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes)", "50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording, speaker): \"\"\"Plot the", "files for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new Recording by", "math from xml.etree import ElementTree from scipy.io import wavfile from matplotlib import pyplot", "speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns", "durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else", "durations = ends - starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts +", "directory root with a filename derived from recording_id.\"\"\" self.root = root self.recording_id =", "= ends - starts fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations", "[] ends = [] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root =", "histogram of segment intervals labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals", "for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50)", "* self.samplerate end = end if end < len(self.signal) else len(self.signal) block =", "m + 60 for start, end in zip(starts, ends): volubility[m] += max(min(end_minute, end)", "speaking) as a time series and histogram. This analysis uses one minute blocks", "self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category,", "pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram of", "= self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self,", "None: starts = recording.starts ends = recording.ends else: i, starts, ends = recording.filter_speaker(speaker)", "pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count') return fig def plot_intervals(recording, speaker): \"\"\"Plot a time", "times of all segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker", "save out individual segments as WAV files for acoustic analysis.\"\"\" def __init__(self, root,", "return fig def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of time that", "pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility,", "volubility ratio (proportion of time that speaker is speaking) as a time series", "reading the ITS file in the directory root with a filename derived from", "contains a method to split and save out individual segments as WAV files", "not require Pacific timezone, lookup lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return", "= numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal =", "import os import math from xml.etree import ElementTree from scipy.io import wavfile from", "start = 0 while start < len(self.signal): end = start + blockSize *", "def plot_speaker_counts(recording): \"\"\"Plot the number of segments in the recording for each speaker.\"\"\"", "out data about the segments and speakers in the corresponding WAV file. It", "zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker,", "numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60", "pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is", "ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts", "logfbank class Recording: \"\"\"Recording reads an ITS file exported from LENA and parses", "numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in", "= tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends", "is None: self.read_recording() for start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))):", "durations, optionally filtered for a speaker.\"\"\" if speaker is None: starts = recording.starts", "math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker)", "time in seconds indicated by the formatted string.\"\"\" # TODO: This should not", "and speakers in the corresponding WAV file. It also contains a method to", "wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an individual segment", "read_recording(self): \"\"\"Read the WAV file corresponding to this Recording. This is deferred because", "plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram of segment intervals labeled as", "= pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)')", "this Recording. This is deferred because it can be slow.\"\"\" filepath = os.path.join(self.root,", "= start + blockSize * self.samplerate end = end if end < len(self.signal)", "and end times of all segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers)", "1, 26)))) start = end return fbanks def split_segments(self): \"\"\"Split the WAV file", "a histogram of segment durations, optionally filtered for a speaker.\"\"\" if speaker is", "\"\"\"Split the WAV file for this recording into individual segments and save those", "a time series and histogram. This analysis uses one minute blocks to aggregate", "self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate,", "pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers)", "1, 1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if", "speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts)", "* m end_minute = 60 * m + 60 for start, end in", "volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2)", "= numpy.zeros((0, 1, 26)) start = 0 while start < len(self.signal): end =", "the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted):", "= numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal = None self.duration =", "60 * m + 60 for start, end in zip(starts, ends): volubility[m] +=", "pyplot.subplot(2, 1, 1) pyplot.plot(starts + durations / 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL'", "for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) +", "features import logfbank class Recording: \"\"\"Recording reads an ITS file exported from LENA", "if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start, end, speaker,", "reads an ITS file exported from LENA and parses out data about the", "<filename>ivfcrvis/recording.py import numpy import os import math from xml.etree import ElementTree from scipy.io", "the corresponding WAV file. It also contains a method to split and save", "in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is", "speaker): \"\"\"Return the indices, start times, and end times of all segments labeled", "file. It also contains a method to split and save out individual segments", "is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration (s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0,", "starts, ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60 * m", "len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series and", "pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval", "end return fbanks def split_segments(self): \"\"\"Split the WAV file for this recording into", "__init__(self, root, recording_id): \"\"\"Construct a new Recording by reading the ITS file in", "* m + 60 for start, end in zip(starts, ends): volubility[m] += max(min(end_minute,", "an ITS file exported from LENA and parses out data about the segments", "pyplot.subplot(2, 1, 2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return", "a filename derived from recording_id.\"\"\" self.root = root self.recording_id = recording_id starts =", "start = end return fbanks def split_segments(self): \"\"\"Split the WAV file for this", "end in zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0) volubility", "\"\"\"Plot the volubility ratio (proportion of time that speaker is speaking) as a", "the ITS file in the directory root with a filename derived from recording_id.\"\"\"", "0 while start < len(self.signal): end = start + blockSize * self.samplerate end", "'{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start times, and end", "speakers self.samplerate = None self.signal = None self.duration = None def read_recording(self): \"\"\"Read", "pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Duration", "i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)]", "pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def plot_volubility(recording,", "in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start * self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir,", "# TODO: This should not require Pacific timezone, lookup lena format spec if", "speaker): \"\"\"Plot a time series and histogram of segment intervals labeled as speaker.\"\"\"", "counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5, speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker')", "= 60 * m end_minute = 60 * m + 60 for start,", "+= max(min(end_minute, end) - max(start_minute, start), 0) volubility /= 60 fig = pyplot.figure()", "identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in", "segment intervals labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:]", "for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends)", "self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read an individual", "tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends =", "os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir)", "speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal", "ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0) volubility /= 60 fig", "= pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time", "import math from xml.etree import ElementTree from scipy.io import wavfile from matplotlib import", "'{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600):", "= os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def", "individual segment WAV file. Returns the sample rate and signal.\"\"\" filename = os.path.join(self.root,", "speakers) pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time", "= recording.filter_speaker(speaker) durations = ends - starts fig = pyplot.figure() pyplot.subplot(2, 1, 1)", "= pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) +", "of segment durations, optionally filtered for a speaker.\"\"\" if speaker is None: starts", "from LENA and parses out data about the segments and speakers in the", "structure according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir):", "and save those segments in a directory structure according to the identified speaker.\"\"\"", "filter_speaker(self, speaker): \"\"\"Return the indices, start times, and end times of all segments", "\"\"\"Recording reads an ITS file exported from LENA and parses out data about", "i): \"\"\"Read an individual segment WAV file. Returns the sample rate and signal.\"\"\"", "= 60 * m + 60 for start, end in zip(starts, ends): volubility[m]", "return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of segments in the recording for", "import numpy import os import math from xml.etree import ElementTree from scipy.io import", "the directory root with a filename derived from recording_id.\"\"\" self.root = root self.recording_id", "segment WAV file. Returns the sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id,", "/ 2, durations) pyplot.title('Vocalization Durations for {0}'.format('ALL' if speaker is None else speaker))", "= wavfile.read(filepath) self.duration = len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal is", "pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for {0}'.format(speaker)) pyplot.xlabel('Time (min)')", "corresponding WAV file. It also contains a method to split and save out", "pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50) pyplot.yscale('log') pyplot.xlabel('Volubility') pyplot.ylabel('Count') return", "blockSize=600): if self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start =", "not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for start, end, speaker, i", "max(min(end_minute, end) - max(start_minute, start), 0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2,", "according to the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir)", "* self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i):", "end) - max(start_minute, start), 0) volubility /= 60 fig = pyplot.figure() pyplot.subplot(2, 1,", "the number of segments in the recording for each speaker.\"\"\" speakers, counts =", "not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not", "+ blockSize * self.samplerate end = end if end < len(self.signal) else len(self.signal)", "= recording.filter_speaker(speaker) for m in range(minutes): start_minute = 60 * m end_minute =", "is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26)) start = 0 while start", "start, end in zip(starts, ends): volubility[m] += max(min(end_minute, end) - max(start_minute, start), 0)", "2) pyplot.hist(intervals, bins=numpy.logspace(0, 4, 50)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Interval (s)') pyplot.ylabel('Count') return fig def", "all segments labeled with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0],", "an individual segment WAV file. Returns the sample rate and signal.\"\"\" filename =", "numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate = None self.signal = None", "end < len(self.signal) else len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05,", "numpy.where(index)[0], self.starts[index], self.ends[index] def parse_time(formatted): \"\"\"Returns the time in seconds indicated by the", "because it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath)", "m in range(minutes): start_minute = 60 * m end_minute = 60 * m", "self.samplerate):int(end * self.samplerate)] wavfile.write(os.path.join(recording_dir, speaker, '{0}.wav'.format(i)), self.samplerate, segment) def read_segment(self, category, i): \"\"\"Read", "None self.duration = None def read_recording(self): \"\"\"Read the WAV file corresponding to this", "fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26))))", "the time in seconds indicated by the formatted string.\"\"\" # TODO: This should", "spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the number of", "file exported from LENA and parses out data about the segments and speakers", "indicated by the formatted string.\"\"\" # TODO: This should not require Pacific timezone,", "from xml.etree import ElementTree from scipy.io import wavfile from matplotlib import pyplot from", "about the segments and speakers in the corresponding WAV file. It also contains", "root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts)", "filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the", "[] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment", "LENA and parses out data about the segments and speakers in the corresponding", "i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig = pyplot.figure()", "xml.etree import ElementTree from scipy.io import wavfile from matplotlib import pyplot from features", "seconds indicated by the formatted string.\"\"\" # TODO: This should not require Pacific", "set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None:", "= end return fbanks def split_segments(self): \"\"\"Split the WAV file for this recording", "filename derived from recording_id.\"\"\" self.root = root self.recording_id = recording_id starts = []", "= recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations = ends - starts", "pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers)) + 0.5,", "< len(self.signal) else len(self.signal) block = self.signal[start:end] fbank = logfbank(block, self.samplerate, winlen=0.05, winstep=0.025)", "blockSize * self.samplerate end = end if end < len(self.signal) else len(self.signal) block", "/= 60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility", "= ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot() for segment in root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime']))", "be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal = wavfile.read(filepath) self.duration = len(self.signal)", "counts = numpy.unique(recording.speakers, return_counts=True) fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of", "1, 1) pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2,", "sample rate and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def", "self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker)", "speakers in the corresponding WAV file. It also contains a method to split", "is deferred because it can be slow.\"\"\" filepath = os.path.join(self.root, '{0}.wav'.format(self.recording_id)) self.samplerate, self.signal", "/ 60) volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m in", "pyplot.xlim(0, len(speakers)) pyplot.xlabel('Speaker') pyplot.ylabel('Count') return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series", "class Recording: \"\"\"Recording reads an ITS file exported from LENA and parses out", "return fig def plot_durations(recording, speaker=None): \"\"\"Plot a time series and a histogram of", "(s)') pyplot.subplot(2, 1, 2) pyplot.hist(durations, bins=numpy.logspace(0, 4, 100)) pyplot.xscale('log') pyplot.yscale('log') pyplot.xlabel('Duration (s)') pyplot.ylabel('Count')", "intervals = starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(starts[1:], intervals)", "fig def plot_intervals(recording, speaker): \"\"\"Plot a time series and histogram of segment intervals", "acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new Recording by reading the", "speaker): \"\"\"Plot the volubility ratio (proportion of time that speaker is speaking) as", "ends = [] speakers = [] tree = ElementTree.parse(os.path.join(self.root, '{0}.its'.format(self.recording_id))) root = tree.getroot()", "and signal.\"\"\" filename = os.path.join(self.root, self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker):", "recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m", "1, 26)) start = 0 while start < len(self.signal): end = start +", "the formatted string.\"\"\" # TODO: This should not require Pacific timezone, lookup lena", "analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new Recording by reading the ITS", "pyplot from features import logfbank class Recording: \"\"\"Recording reads an ITS file exported", "analysis uses one minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0])", "intervals labeled as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] -", "string.\"\"\" # TODO: This should not require Pacific timezone, lookup lena format spec", "starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2,", "len(self.signal) / self.samplerate def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks =", "the identified speaker.\"\"\" recording_dir = os.path.join(self.root, self.recording_id) if not os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker", "= os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording() for", "26)))) start = end return fbanks def split_segments(self): \"\"\"Split the WAV file for", "the volubility ratio (proportion of time that speaker is speaking) as a time", "method to split and save out individual segments as WAV files for acoustic", "WAV file corresponding to this Recording. This is deferred because it can be", "def plot_volubility(recording, speaker): \"\"\"Plot the volubility ratio (proportion of time that speaker is", "speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir): os.makedirs(speaker_dir) if self.signal is None: self.read_recording()", "split and save out individual segments as WAV files for acoustic analysis.\"\"\" def", "volubility = numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute", "WAV file for this recording into individual segments and save those segments in", "those segments in a directory structure according to the identified speaker.\"\"\" recording_dir =", "save those segments in a directory structure according to the identified speaker.\"\"\" recording_dir", "fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return fbanks def", "pyplot.xlabel('Time (min)') pyplot.ylabel('Vocalized Seconds / Minute') pyplot.subplot(2, 1, 2) pyplot.hist(volubility, bins=50) pyplot.yscale('log') pyplot.xlabel('Volubility')", "and histogram. This analysis uses one minute blocks to aggregate segments.\"\"\" minutes =", "of segments in the recording for each speaker.\"\"\" speakers, counts = numpy.unique(recording.speakers, return_counts=True)", "optionally filtered for a speaker.\"\"\" if speaker is None: starts = recording.starts ends", "= numpy.zeros(minutes) i, starts, ends = recording.filter_speaker(speaker) for m in range(minutes): start_minute =", "root.iter('Segment'): speakers.append(segment.attrib['spkr']) starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers", "start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment = self.signal[int(start *", "recording.ends else: i, starts, ends = recording.filter_speaker(speaker) durations = ends - starts fig", "for this recording into individual segments and save those segments in a directory", "with the speaker.\"\"\" index = numpy.array(self.speakers) == speaker return numpy.where(index)[0], self.starts[index], self.ends[index] def", "minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility = numpy.zeros(minutes) i, starts, ends", "ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig = pyplot.figure() pyplot.subplot(2, 1,", "minute blocks to aggregate segments.\"\"\" minutes = math.ceil((recording.ends[-1] - recording.starts[0]) / 60) volubility", "import ElementTree from scipy.io import wavfile from matplotlib import pyplot from features import", "self.samplerate end = end if end < len(self.signal) else len(self.signal) block = self.signal[start:end]", "as speaker.\"\"\" i, starts, ends = recording.filter_speaker(speaker) intervals = starts[1:] - ends[:-1] fig", "wavfile from matplotlib import pyplot from features import logfbank class Recording: \"\"\"Recording reads", "60 fig = pyplot.figure() pyplot.subplot(2, 1, 1) pyplot.plot(60 * numpy.arange(minutes), volubility) pyplot.title('Volubility for", "end = end if end < len(self.signal) else len(self.signal) block = self.signal[start:end] fbank", "os.path.exists(recording_dir): os.makedirs(recording_dir) for speaker in set(self.speakers): speaker_dir = os.path.join(recording_dir, speaker) if not os.path.exists(speaker_dir):", "winlen=0.05, winstep=0.025) fbanks = numpy.concatenate((fbanks, numpy.reshape(fbank, (len(fbank), 1, 26)))) start = end return", "None: self.read_recording() for start, end, speaker, i in zip(self.starts, self.ends, self.speakers, range(len(self.starts))): segment", "pyplot.plot(starts[1:], intervals) pyplot.title('Vocalization Intervals for {0}'.format(speaker)) pyplot.xlabel('Time (s)') pyplot.ylabel('Interval (s)') pyplot.subplot(2, 1, 2)", "for acoustic analysis.\"\"\" def __init__(self, root, recording_id): \"\"\"Construct a new Recording by reading", "def __init__(self, root, recording_id): \"\"\"Construct a new Recording by reading the ITS file", "fig = pyplot.figure() pyplot.bar(numpy.arange(len(speakers)) + 0.1, counts) pyplot.title('Number of Vocalizations by Speaker') pyplot.xticks(numpy.arange(len(speakers))", "starts.append(parse_time(segment.attrib['startTime'])) ends.append(parse_time(segment.attrib['endTime'])) self.starts = numpy.array(starts) self.ends = numpy.array(ends) self.speakers = speakers self.samplerate =", "self.recording_id, category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start times,", "category, '{0}.wav'.format(i)) return wavfile.read(filename) def filter_speaker(self, speaker): \"\"\"Return the indices, start times, and", "= 0 while start < len(self.signal): end = start + blockSize * self.samplerate", "the WAV file for this recording into individual segments and save those segments", "lena format spec if formatted.startswith('PT') and formatted.endswith('S'): return float(formatted[2:-1]) def plot_speaker_counts(recording): \"\"\"Plot the", "end_minute = 60 * m + 60 for start, end in zip(starts, ends):", "def frequency_banks(self, blockSize=600): if self.signal is None: self.read_recording() fbanks = numpy.zeros((0, 1, 26))" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] =", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "# limitations under the License. import base64 import datetime from touchdown.core.utils import force_bytes,", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "import datetime from touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber):", "force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return", "self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\":", "def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\",", "License. # You may obtain a copy of the License at # #", "expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", }", "self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str(", "specific language governing permissions and # limitations under the License. import base64 import", "force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self):", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\":", "2016 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the", "client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "Limited # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "\"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "language governing permissions and # limitations under the License. import base64 import datetime", "= force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self):", "return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response( \"delete_launch_configuration\", service_response={}, expected_params={\"LaunchConfigurationName\":", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False},", "\"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params,", "License. import base64 import datetime from touchdown.core.utils import force_bytes, force_str from .service import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", ") def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\",", "def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params", "add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\":", "touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\"", "user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\",", "expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response(", "ANY KIND, either express or implied. # See the License for the specific", "[launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def", "import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\":", "from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response(", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "{ \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data:", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "\"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\",", "use this file except in compliance with the License. # You may obtain", "def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\":", "not use this file except in compliance with the License. # You may", "\"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={},", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "= user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response( \"delete_launch_configuration\",", "[]}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\":", "Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the \"License\");", "See the License for the specific language governing permissions and # limitations under", "\"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\",", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\":", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if", "\"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response( \"delete_launch_configuration\", service_response={}, expected_params={\"LaunchConfigurationName\": self.resource.name}, )", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response( \"delete_launch_configuration\", service_response={}, expected_params={\"LaunchConfigurationName\": self.resource.name},", ") def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\":", "OF ANY KIND, either express or implied. # See the License for the", "2.0 (the \"License\"); # you may not use this file except in compliance", ".service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\",", "# you may not use this file except in compliance with the License.", "class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={},", "ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []},", "expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self,", "user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, )", "agreed to in writing, software # distributed under the License is distributed on", "\"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) )", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "base64 import datetime from touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber class", "add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params =", "(the \"License\"); # you may not use this file except in compliance with", "from touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service =", "} if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]},", "service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, )", "# # Unless required by applicable law or agreed to in writing, software", "} if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def", "express or implied. # See the License for the specific language governing permissions", "\"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name,", "datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\":", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\":", "by applicable law or agreed to in writing, software # distributed under the", "= \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return", "return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = {", "either express or implied. # See the License for the specific language governing", "Copyright 2016 Isotoma Limited # # Licensed under the Apache License, Version 2.0", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "the specific language governing permissions and # limitations under the License. import base64", "base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response(", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "\"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] =", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", ") return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\",", "launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def", "file except in compliance with the License. # You may obtain a copy", "return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None): expected_params = {", "{ \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"]", ") def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []}, expected_params={}, ) def add_create_launch_configuration(self, user_data=None):", "if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self):", "add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(),", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, )", "License for the specific language governing permissions and # limitations under the License.", "\"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "and # limitations under the License. import base64 import datetime from touchdown.core.utils import", "service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\":", "\"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response(", "the License. # You may obtain a copy of the License at #", "if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={},", "user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), }", "= { \"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return", "under the License. import base64 import datetime from touchdown.core.utils import force_bytes, force_str from", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "\"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data))", "self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": [launch_config]}, expected_params={}, ) def add_describe_auto_scaling_groups(self): return self.add_response( \"describe_auto_scaling_groups\", service_response={\"AutoScalingGroups\": []},", "\"License\"); # you may not use this file except in compliance with the", "LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, )", "datetime from touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "= { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\": \"t2.micro\", \"CreatedTime\": datetime.datetime.now(), } if user_data:", "limitations under the License. import base64 import datetime from touchdown.core.utils import force_bytes, force_str", "applicable law or agreed to in writing, software # distributed under the License", "[]}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\",", "the License. import base64 import datetime from touchdown.core.utils import force_bytes, force_str from .service", "or agreed to in writing, software # distributed under the License is distributed", "\"CreatedTime\": datetime.datetime.now(), } if user_data: launch_config[\"UserData\"] = force_str( base64.b64encode(force_bytes(user_data)) ) return self.add_response( \"describe_launch_configurations\",", "import base64 import datetime from touchdown.core.utils import force_bytes, force_str from .service import ServiceStubber", "permissions and # limitations under the License. import base64 import datetime from touchdown.core.utils", "False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return self.add_response(", "or implied. # See the License for the specific language governing permissions and", "\"ImageId\": \"ami-cba130bc\", \"InstanceMonitoring\": {\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"]", "\"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return self.add_response( \"create_launch_configuration\", service_response={},", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config =", "expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None): launch_config = { \"LaunchConfigurationName\": self.resource.name, \"ImageId\": \"ami-cba130bc\", \"InstanceType\":", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "{\"Enabled\": False}, \"InstanceType\": \"t2.micro\", \"LaunchConfigurationName\": \"my-test-lc.1\", } if user_data: expected_params[\"UserData\"] = user_data return", "user_data return self.add_response( \"create_launch_configuration\", service_response={}, expected_params=expected_params, ) def add_delete_launch_configuration(self): return self.add_response( \"delete_launch_configuration\", service_response={},", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"ec2\" def add_describe_launch_configurations_empty_response(self): return self.add_response( \"describe_launch_configurations\", service_response={\"LaunchConfigurations\": []}, expected_params={}, ) def add_describe_launch_configurations_one_response(self, user_data=None):", "governing permissions and # limitations under the License. import base64 import datetime from", "import force_bytes, force_str from .service import ServiceStubber class LaunchConfigurationStubber(ServiceStubber): client_service = \"ec2\" def", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "# Copyright 2016 Isotoma Limited # # Licensed under the Apache License, Version", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "<reponame>musaibnazir/MixedPy num = 5 for i in range(0,num): for j in range(0,num-i-1): print(end=\"", "range(0,num): for j in range(0,num-i-1): print(end=\" \") for j in range(1,i+1): print(j,\" \",end=\"\")", "for i in range(0,num): for j in range(0,num-i-1): print(end=\" \") for j in", "5 for i in range(0,num): for j in range(0,num-i-1): print(end=\" \") for j", "in range(0,num): for j in range(0,num-i-1): print(end=\" \") for j in range(1,i+1): print(j,\"", "for j in range(0,num-i-1): print(end=\" \") for j in range(1,i+1): print(j,\" \",end=\"\") print()", "i in range(0,num): for j in range(0,num-i-1): print(end=\" \") for j in range(1,i+1):", "= 5 for i in range(0,num): for j in range(0,num-i-1): print(end=\" \") for", "num = 5 for i in range(0,num): for j in range(0,num-i-1): print(end=\" \")" ]
[ "0 intNum2 = 0 for i in num1: intNum1 = intNum1 * 10", "-> str: intNum1 = 0 intNum2 = 0 for i in num1: intNum1", "num1: intNum1 = intNum1 * 10 + int(i) for i in num2: intNum2", "+ int(i) for i in num2: intNum2 = intNum2 * 10 + int(i)", "= 0 intNum2 = 0 for i in num1: intNum1 = intNum1 *", "* 10 + int(i) for i in num2: intNum2 = intNum2 * 10", "intNum2 = intNum2 * 10 + int(i) result = str(intNum1 + intNum2) return", "for i in num2: intNum2 = intNum2 * 10 + int(i) result =", "10 + int(i) for i in num2: intNum2 = intNum2 * 10 +", "str, num2: str) -> str: intNum1 = 0 intNum2 = 0 for i", "str) -> str: intNum1 = 0 intNum2 = 0 for i in num1:", "def addStrings(self, num1: str, num2: str) -> str: intNum1 = 0 intNum2 =", "intNum2 = 0 for i in num1: intNum1 = intNum1 * 10 +", "= intNum1 * 10 + int(i) for i in num2: intNum2 = intNum2", "num1: str, num2: str) -> str: intNum1 = 0 intNum2 = 0 for", "0 for i in num1: intNum1 = intNum1 * 10 + int(i) for", "= intNum2 * 10 + int(i) result = str(intNum1 + intNum2) return result", "= 0 for i in num1: intNum1 = intNum1 * 10 + int(i)", "str: intNum1 = 0 intNum2 = 0 for i in num1: intNum1 =", "in num1: intNum1 = intNum1 * 10 + int(i) for i in num2:", "intNum1 = 0 intNum2 = 0 for i in num1: intNum1 = intNum1", "for i in num1: intNum1 = intNum1 * 10 + int(i) for i", "num2: str) -> str: intNum1 = 0 intNum2 = 0 for i in", "int(i) for i in num2: intNum2 = intNum2 * 10 + int(i) result", "Solution: def addStrings(self, num1: str, num2: str) -> str: intNum1 = 0 intNum2", "num2: intNum2 = intNum2 * 10 + int(i) result = str(intNum1 + intNum2)", "i in num2: intNum2 = intNum2 * 10 + int(i) result = str(intNum1", "<reponame>yuzhengcuhk/MyLeetcodeRecord<gh_stars>1-10 class Solution: def addStrings(self, num1: str, num2: str) -> str: intNum1 =", "addStrings(self, num1: str, num2: str) -> str: intNum1 = 0 intNum2 = 0", "in num2: intNum2 = intNum2 * 10 + int(i) result = str(intNum1 +", "intNum1 = intNum1 * 10 + int(i) for i in num2: intNum2 =", "class Solution: def addStrings(self, num1: str, num2: str) -> str: intNum1 = 0", "intNum1 * 10 + int(i) for i in num2: intNum2 = intNum2 *", "i in num1: intNum1 = intNum1 * 10 + int(i) for i in" ]
[ "dependencies to be installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory", "timeout=timeout, layers=layers.split(',') if layers else [] ) if func.exists(): click.echo(f'lambdapool function {function_name} already", "\"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete() click.echo(f'===", "exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===')", "func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT", "type=click.INT, help=\"Sets the memory size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the", "def list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda", "= LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = [] for", "function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting", "lambdapool import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the", "of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function in", "reverse=True) rows = [] for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']),", "pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with", "(a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def", "requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function ===')", "= [] for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout']", "@click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function in seconds\") @click.option('--layers', help=\"Sets the", "'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the", "func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows,", "(MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed", "be used when the function is ran. The Layers ARN's (a maximum of", "paths, requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function", "the function in seconds\") @click.option('--layers', help=\"Sets the layers to be used when the", "the memory size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for", "LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) if", "Layers ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1)", "5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory,", "click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List all deployed", "as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name',", "click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a", "NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies", "should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout,", "\"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'],", "be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout,", "list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x:", "click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r',", "def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating", "@click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an", "nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\"", "'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along", "all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True)", "rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE',", "{function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool", "to be installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size", "function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1)", "nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('===", "def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name)", ") if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as", "maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths,", "the function is ran. The Layers ARN's (a maximum of 5) should be", "created lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs", "import click from .function import LambdaPoolFunction from . import utils from tabulate import", "function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function in seconds\") @click.option('--layers',", "import utils from tabulate import tabulate from lambdapool import exceptions @click.group() def cli():", "type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('===", "exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to", "func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function", "utils from tabulate import tabulate from lambdapool import exceptions @click.group() def cli(): pass", "def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating", "timeout=timeout, layers=layers.split(',') if layers else [] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR:", "function in seconds\") @click.option('--layers', help=\"Sets the layers to be used when the function", "layers else [] ) if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create()", "new function\"\"\" click.echo('=== Creating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths,", "x['last_updated'], reverse=True) rows = [] for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']),", "with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of the function environment\")", "funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = [] for func in", "memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function ===') try:", "Creating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout,", "is ran. The Layers ARN's (a maximum of 5) should be specified.\") @click.argument('function_name',", "a new function\"\"\" click.echo('=== Creating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name,", "deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows", "@cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with the", "funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME',", "5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements,", "sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List all", "sys import click from .function import LambdaPoolFunction from . import utils from tabulate", "[] for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ]", "help=\"Specifies the dependencies to be installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets", "requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) if func.exists(): click.echo(f'lambdapool function", "except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name}", "key=lambda x: x['last_updated'], reverse=True) rows = [] for func in funcs: rows.append( [", "of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements,", "type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with the function\") @click.option('--memory', type=click.INT,", "the timeout for the function in seconds\") @click.option('--layers', help=\"Sets the layers to be", "@click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\"", "memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) func.update() except exceptions.LambdaFunctionError as e:", "'-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with the function\") @click.option('--memory',", "environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function in seconds\") @click.option('--layers', help=\"Sets", "to be used when the function is ran. The Layers ARN's (a maximum", "The Layers ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths',", "function\"\"\" click.echo('=== Creating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements,", "\"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function ===') try: func = LambdaPoolFunction(", "===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function", "Layers ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1,", "function\"\"\" click.echo('=== Updating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements,", "e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1)", "@click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with the function\")", "if layers else [] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1)", "memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function ===') try:", "import sys import click from .function import LambdaPoolFunction from . import utils from", "\"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function ===') try: func = LambdaPoolFunction(", "funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = []", "Updating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout,", "function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of the function environment\") @click.option('--timeout', type=click.INT,", "(SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along with", "layers=layers.split(',') if layers else [] ) if func.exists(): click.echo(f'lambdapool function {function_name} already exists')", "function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) if func.exists():", "memory size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the", "memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) if func.exists(): click.echo(f'lambdapool function {function_name}", "click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete() click.echo(f'=== Deleted lambdapool function", "should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory,", "func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR:", "functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows =", "] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command()", "else [] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated", "nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a", "{e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name):", "along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of the function", "{e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List", "layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function ===') try: func =", "lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('===", "@cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs = sorted(funcs,", "import tabulate from lambdapool import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r',", "create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool", "e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command() def", "sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = [] for func in funcs: rows.append(", "===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs =", "specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update", "{function_name} ===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list() funcs", "cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed along", "===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers", "function {function_name} ===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs = LambdaPoolFunction.list()", "sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete", "nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===') func =", "function\"\"\" click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete() click.echo(f'=== Deleted lambdapool", "requirements, memory, timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function ===')", ". import utils from tabulate import tabulate from lambdapool import exceptions @click.group() def", "type=click.INT, help=\"Sets the timeout for the function in seconds\") @click.option('--layers', help=\"Sets the layers", "delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete()", "func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name}", "ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def", "tabulate import tabulate from lambdapool import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements',", "specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers):", "@click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create", "tabulate from lambdapool import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True),", "except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===')", "function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if", "'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to", "sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool", "def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be installed", "'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies", "for the function in seconds\") @click.option('--layers', help=\"Sets the layers to be used when", "the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function in seconds\")", "timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function ===') try: func", "paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) if func.exists(): click.echo(f'lambdapool", "try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else", "requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) func.update() except exceptions.LambdaFunctionError as", "help=\"Sets the timeout for the function in seconds\") @click.option('--layers', help=\"Sets the layers to", "rows = [] for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'],", "layers=layers.split(',') if layers else [] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}')", "in seconds\") @click.option('--layers', help=\"Sets the layers to be used when the function is", "function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) func.update() except", "nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an existing", "already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully", "of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths,", "Succesfully created lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\"", "update(function_name, paths, requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool", "from .function import LambdaPoolFunction from . import utils from tabulate import tabulate from", "the dependencies to be installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the", "click from .function import LambdaPoolFunction from . import utils from tabulate import tabulate", "from lambdapool import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies", "utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY", "@click.option('--layers', help=\"Sets the layers to be used when the function is ran. The", "utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)',", "layers else [] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'===", "Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\"", "in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION", "@cli.command() @click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===')", "lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',')", "layers to be used when the function is ran. The Layers ARN's (a", "a function\"\"\" click.echo('=== Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete() click.echo(f'=== Deleted", "= LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] )", "if layers else [] ) if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1)", "[] ) if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError", "{function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'===", "click.echo('=== Updating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory,", ".function import LambdaPoolFunction from . import utils from tabulate import tabulate from lambdapool", "@click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name, paths, requirements, memory, timeout, layers): \"\"\"Create a new", "click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command() def list():", "LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) func.update()", "headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True),", "be installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of", "maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True)) def create(function_name,", "layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function ===') try: func =", "click.echo('=== Creating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory,", ") click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements',", "@click.argument('function_name', nargs=1) def delete(function_name): \"\"\"Delete a function\"\"\" click.echo('=== Deleting lambdapool function ===') func", "if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e:", "paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else [] ) func.update() except exceptions.LambdaFunctionError", "lambdapool function {function_name} ===') @cli.command() def list(): \"\"\"List all deployed functions\"\"\" funcs =", "paths, requirements, memory, timeout, layers): \"\"\"Update an existing function\"\"\" click.echo('=== Updating lambdapool function", "click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command() @click.argument('function_name', nargs=1) def", "exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created", "as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Succesfully created lambdapool function {function_name} ===') @cli.command()", "Deleting lambdapool function ===') func = LambdaPoolFunction(function_name=function_name) func.delete() click.echo(f'=== Deleted lambdapool function {function_name}===')", "existing function\"\"\" click.echo('=== Updating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name, paths=paths,", "size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout for the function", "MEMORY (MB)', 'TIMEOUT (SEC)'])) @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be", "= sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = [] for func in funcs:", "[] ) func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool", "from tabulate import tabulate from lambdapool import exceptions @click.group() def cli(): pass @cli.command()", "func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME MEMORY (MB)', 'TIMEOUT (SEC)']))", "else [] ) if func.exists(): click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except", "ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1, type=click.Path(exists=True))", "ran. The Layers ARN's (a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1)", "the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of the function environment\") @click.option('--timeout',", "timeout for the function in seconds\") @click.option('--layers', help=\"Sets the layers to be used", "function is ran. The Layers ARN's (a maximum of 5) should be specified.\")", "x: x['last_updated'], reverse=True) rows = [] for func in funcs: rows.append( [ func['function_name'],", "from . import utils from tabulate import tabulate from lambdapool import exceptions @click.group()", "when the function is ran. The Layers ARN's (a maximum of 5) should", "(a maximum of 5) should be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name,", "seconds\") @click.option('--layers', help=\"Sets the layers to be used when the function is ran.", "import LambdaPoolFunction from . import utils from tabulate import tabulate from lambdapool import", "be specified.\") @click.argument('function_name', nargs=1) @click.argument('paths', nargs=-1) def update(function_name, paths, requirements, memory, timeout, layers):", "func = LambdaPoolFunction( function_name=function_name, paths=paths, requirements=requirements, memory=memory, timeout=timeout, layers=layers.split(',') if layers else []", "an existing function\"\"\" click.echo('=== Updating lambdapool function ===') try: func = LambdaPoolFunction( function_name=function_name,", "@click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies to be", "@click.option('--memory', type=click.INT, help=\"Sets the memory size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets", "help=\"Sets the memory size of the function environment\") @click.option('--timeout', type=click.INT, help=\"Sets the timeout", "[ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN',", "timeout, layers): \"\"\"Create a new function\"\"\" click.echo('=== Creating lambdapool function ===') try: func", "click.echo(f'lambdapool function {function_name} already exists') sys.exit(1) func.create() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}')", "the layers to be used when the function is ran. The Layers ARN's", "for func in funcs: rows.append( [ func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] )", "LambdaPoolFunction.list() funcs = sorted(funcs, key=lambda x: x['last_updated'], reverse=True) rows = [] for func", "func['function_name'], utils.convert_size(func['size']), utils.datestr(func['last_updated']), func['memory'], func['timeout'] ] ) click.echo(tabulate(rows, headers=['FUNCTION NAME', 'SIZE', 'WHEN', 'RUNTIME", "exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function {function_name} ===') @cli.command()", "LambdaPoolFunction from . import utils from tabulate import tabulate from lambdapool import exceptions", "import exceptions @click.group() def cli(): pass @cli.command() @click.option('--requirements', '-r', type=click.Path(exists=True), help=\"Specifies the dependencies", "help=\"Sets the layers to be used when the function is ran. The Layers", "used when the function is ran. The Layers ARN's (a maximum of 5)", "installed along with the function\") @click.option('--memory', type=click.INT, help=\"Sets the memory size of the", ") func.update() except exceptions.LambdaFunctionError as e: click.echo(f'ERROR: {e}') sys.exit(1) click.echo(f'=== Updated lambdapool function" ]
[ "table = [[ 0 for _ in range(0, C+1)] for _ in range(0,", "# capacity val = [ int(v) for v in input().strip().split(\" \")] wt =", "c in range(1, C+1): if c - wt[i-1] < 0: table[i][c] = table[i-1][c]", "= max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W = int(input().strip()) # capacity", "for i in range(1, N+1): for c in range(1, C+1): if c -", "N = int(input().strip()) W = int(input().strip()) # capacity val = [ int(v) for", "= int(input().strip()) # capacity val = [ int(v) for v in input().strip().split(\" \")]", "input().strip().split(\" \")] wt = [ int(w) for w in input().strip().split(\" \")] print(knapsack(val, wt,", "\")] wt = [ int(w) for w in input().strip().split(\" \")] print(knapsack(val, wt, N,", "in range(1, N+1): for c in range(1, C+1): if c - wt[i-1] <", "N+1): for c in range(1, C+1): if c - wt[i-1] < 0: table[i][c]", "table[0][0] = 0 for i in range(1, N+1): for c in range(1, C+1):", "0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N =", "table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W =", "= int(input().strip()) W = int(input().strip()) # capacity val = [ int(v) for v", "in input().strip().split(\" \")] wt = [ int(w) for w in input().strip().split(\" \")] print(knapsack(val,", "- wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return", "= [ int(v) for v in input().strip().split(\" \")] wt = [ int(w) for", "wt, N, C): table = [[ 0 for _ in range(0, C+1)] for", "wt = [ int(w) for w in input().strip().split(\" \")] print(knapsack(val, wt, N, W))", "if c - wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c],", "c - wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1])", "def knapsack(val, wt, N, C): table = [[ 0 for _ in range(0,", "0 for _ in range(0, C+1)] for _ in range(0, N+1)] table[0][0] =", "v in input().strip().split(\" \")] wt = [ int(w) for w in input().strip().split(\" \")]", "range(1, N+1): for c in range(1, C+1): if c - wt[i-1] < 0:", "= table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W", "capacity val = [ int(v) for v in input().strip().split(\" \")] wt = [", "< 0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N", "return table[N][C] N = int(input().strip()) W = int(input().strip()) # capacity val = [", "int(input().strip()) W = int(input().strip()) # capacity val = [ int(v) for v in", "range(0, N+1)] table[0][0] = 0 for i in range(1, N+1): for c in", "table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W = int(input().strip()) #", "C): table = [[ 0 for _ in range(0, C+1)] for _ in", "in range(1, C+1): if c - wt[i-1] < 0: table[i][c] = table[i-1][c] else:", "for _ in range(0, C+1)] for _ in range(0, N+1)] table[0][0] = 0", "int(v) for v in input().strip().split(\" \")] wt = [ int(w) for w in", "in range(0, C+1)] for _ in range(0, N+1)] table[0][0] = 0 for i", "0 for i in range(1, N+1): for c in range(1, C+1): if c", "N+1)] table[0][0] = 0 for i in range(1, N+1): for c in range(1,", "wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C]", "C+1): if c - wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c] =", "max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W = int(input().strip()) # capacity val", "_ in range(0, C+1)] for _ in range(0, N+1)] table[0][0] = 0 for", "knapsack(val, wt, N, C): table = [[ 0 for _ in range(0, C+1)]", "for _ in range(0, N+1)] table[0][0] = 0 for i in range(1, N+1):", "for c in range(1, C+1): if c - wt[i-1] < 0: table[i][c] =", "_ in range(0, N+1)] table[0][0] = 0 for i in range(1, N+1): for", "val = [ int(v) for v in input().strip().split(\" \")] wt = [ int(w)", "= 0 for i in range(1, N+1): for c in range(1, C+1): if", "else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W = int(input().strip())", "#0/1 Knapsack problem def knapsack(val, wt, N, C): table = [[ 0 for", "W = int(input().strip()) # capacity val = [ int(v) for v in input().strip().split(\"", "[ int(v) for v in input().strip().split(\" \")] wt = [ int(w) for w", "[[ 0 for _ in range(0, C+1)] for _ in range(0, N+1)] table[0][0]", "i in range(1, N+1): for c in range(1, C+1): if c - wt[i-1]", "Knapsack problem def knapsack(val, wt, N, C): table = [[ 0 for _", "C+1)] for _ in range(0, N+1)] table[0][0] = 0 for i in range(1,", "N, C): table = [[ 0 for _ in range(0, C+1)] for _", "in range(0, N+1)] table[0][0] = 0 for i in range(1, N+1): for c", "table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip()) W = int(input().strip()) # capacity val =", "table[i][c] = table[i-1][c] else: table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1]) return table[N][C] N = int(input().strip())", "table[N][C] N = int(input().strip()) W = int(input().strip()) # capacity val = [ int(v)", "for v in input().strip().split(\" \")] wt = [ int(w) for w in input().strip().split(\"", "problem def knapsack(val, wt, N, C): table = [[ 0 for _ in", "= [[ 0 for _ in range(0, C+1)] for _ in range(0, N+1)]", "int(input().strip()) # capacity val = [ int(v) for v in input().strip().split(\" \")] wt", "range(0, C+1)] for _ in range(0, N+1)] table[0][0] = 0 for i in", "range(1, C+1): if c - wt[i-1] < 0: table[i][c] = table[i-1][c] else: table[i][c]", "<reponame>arslantalib3/algo_ds_101<filename>Algorithms/Dynamic_Programming/0-1_Knapsack_Problem/knapsack_problem_0_1.py #0/1 Knapsack problem def knapsack(val, wt, N, C): table = [[ 0" ]
[ "next time.\\n\\n\\n\") else: play_again() def play_game(): item = [] option = random.choice([\"pirate\", \"fairy\",", "behind a \" \"rock.\") print_pause(\"\\nYou have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou", "Ogoroth shines brightly in \" \"your hand as you brace yourself for the", "of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and take \" \"the sword", "have been defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou run back into", "field(item, option): print_pause(\"Enter 1 to knock on the door of the house.\") print_pause(\"Enter", "True: choice1 = input(\"(Please enter 1 or 2.)\\n\") if choice1 == \"1\": house(item,", "import time import random def print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find", "option) break elif choice1 == \"2\": cave(item, option) break def play_again(): again =", "sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in \" \"your hand as you", "option + \"takes one look at \" \"your shiny new toy and runs", "been \" \"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter 1 to knock", "== \"2\": print_pause(\"\\nYou run back into the field. \" \"\\nLuckily, you don't seem", "\" is somewhere around \" \"here, and has been terrifying the nearby village.\\n\")", "yourself for the \" \"attack.\") print_pause(\"\\nBut the \" + option + \"takes one", "game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next", "run back into the field. \" \"\\nLuckily, you don't seem to have been", "break elif choice1 == \"2\": cave(item, option) break def play_again(): again = input(\"Would", "else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is no match for the", "in item: print_pause(\"\\nAs the \" + option + \" moves to attack, \"", "\" \"attack.\") print_pause(\"\\nBut the \" + option + \"takes one look at \"", "+ option + \"takes one look at \" \"your shiny new toy and", "= input(\"Would you like to play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent!", "around \" \"here, and has been terrifying the nearby village.\\n\") print_pause(\"In front of", "choice1 = input(\"(Please enter 1 or 2.)\\n\") if choice1 == \"1\": house(item, option)", "1 or 2.)\\n\") if choice1 == \"1\": house(item, option) break elif choice1 ==", "\"you unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in \"", "only having a tiny dagger.\\n\") while True: choice2 = input(\"Would you like to", "walk back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt", "+ option + \" is somewhere around \" \"here, and has been terrifying", "into the cave.\") print_pause(\"What would you like to do?\") while True: choice1 =", "old dagger and take \" \"the sword with you.\") print_pause(\"\\nYou walk back out", "to the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach the door", "\" + option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2", "away!\") print_pause(\"\\nYou have rid the town of the \" + option + \".", "front of you is a house.\\n\") print_pause(\"To your right is a dark cave.\\n\")", "\".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou run", "a \" + option + \".\") print_pause(\"\\nEep! This is the \" + option", "under-prepared for this, \" \"what with only having a tiny dagger.\\n\") while True:", "= [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"]) intro(item, option) field(item, option)", "+ \"takes one look at \" \"your shiny new toy and runs away!\")", "as you brace yourself for the \" \"attack.\") print_pause(\"\\nBut the \" + option", "brace yourself for the \" \"attack.\") print_pause(\"\\nBut the \" + option + \"takes", "\"run away?\") if choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs the \"", "True: choice2 = input(\"Would you like to (1) fight or (2) \" \"run", "break def field(item, option): print_pause(\"Enter 1 to knock on the door of the", "print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\")", "def play_again(): again = input(\"Would you like to play again? (y/n)\").lower() if again", "(2) \" \"run away?\") if choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs", "= input(\"Would you like to (1) fight or (2) \" \"run away?\") if", "again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif", "import random def print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing", "print_pause(\"\\nYou have rid the town of the \" + option + \". You", "and gotten all\" \" the good stuff. It's just an empty cave\" \"", "It's just an empty cave\" \" now.\") print_pause(\"\\nYou walk back to the field.\\n\")", "and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \" + option + \"", "best...\") print_pause(\"but your dagger is no match for the \" + option +", "the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns out to", "play_again() break if choice2 == \"2\": print_pause(\"\\nYou run back into the field. \"", "have rid the town of the \" + option + \". You are", "back into the field. \" \"\\nLuckily, you don't seem to have been \"", "\"\\nLuckily, you don't seem to have been \" \"followed.\\n\") field(item, option) break def", "(but not very \" \"effective) dagger.\\n\") def cave(item, option): if \"sword\" in item:", "gotten all\" \" the good stuff. It's just an empty cave\" \" now.\")", "of metal behind a \" \"rock.\") print_pause(\"\\nYou have found the magical Sword of", "in an open field, filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has", "\"effective) dagger.\\n\") def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously into", "\" \"what with only having a tiny dagger.\\n\") while True: choice2 = input(\"Would", "runs away!\") print_pause(\"\\nYou have rid the town of the \" + option +", "a dark cave.\\n\") print_pause(\"In your hand you hold your trusty (but not very", "if \"sword\" in item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been here", "dagger.\\n\") def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously into the", "you.\") print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\") field(item, option) def house(item,", "\" \"opens and out steps a \" + option + \".\") print_pause(\"\\nEep! This", "has been terrifying the nearby village.\\n\") print_pause(\"In front of you is a house.\\n\")", "print_pause(\"\\nYou've been here before, and gotten all\" \" the good stuff. It's just", "cautiously into the cave.\") print_pause(\"\\nIt turns out to be only a very small", "knock when the door \" \"opens and out steps a \" + option", "are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is no match", "a \" \"rock.\") print_pause(\"\\nYou have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard", "grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \" + option +", "like to play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game", "when the door \" \"opens and out steps a \" + option +", "enter 1 or 2.)\\n\") if choice1 == \"1\": house(item, option) break elif choice1", "option): print_pause(\"You find yourself standing in an open field, filled \" \"with grass", "option + \" attacks you!\\n\") if \"sword\" not in item: print_pause(\"You feel a", "print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been here before, and gotten all\"", "brightly in \" \"your hand as you brace yourself for the \" \"attack.\")", "of the \" + option + \". You are victorious!\\n\") else: print_pause(\"\\nYou do", "while True: choice1 = input(\"(Please enter 1 or 2.)\\n\") if choice1 == \"1\":", "in item: print_pause(\"You feel a bit under-prepared for this, \" \"what with only", "\"the sword with you.\") print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\") field(item,", "the door of the house.\") print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What", "find yourself standing in an open field, filled \" \"with grass and yellow", "again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else: play_again() def", "choice2 == \"2\": print_pause(\"\\nYou run back into the field. \" \"\\nLuckily, you don't", "house.\") print_pause(\"\\nYou are about to knock when the door \" \"opens and out", "+ option + \" moves to attack, \" \"you unsheath your new sword.\")", "\"2\": cave(item, option) break def play_again(): again = input(\"Would you like to play", "door \" \"opens and out steps a \" + option + \".\") print_pause(\"\\nEep!", "peer cautiously into the cave.\") print_pause(\"\\nIt turns out to be only a very", "somewhere around \" \"here, and has been terrifying the nearby village.\\n\") print_pause(\"In front", "print_pause(\"\\nThe \" + option + \" attacks you!\\n\") if \"sword\" not in item:", "the \" + option + \" moves to attack, \" \"you unsheath your", "item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach the door of the house.\")", "2.)\\n\") if choice1 == \"1\": house(item, option) break elif choice1 == \"2\": cave(item,", "print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing in an open field,", "the \" + option + \". You are victorious!\\n\") else: print_pause(\"\\nYou do your", "\"'s house!\") print_pause(\"\\nThe \" + option + \" attacks you!\\n\") if \"sword\" not", "your right is a dark cave.\\n\") print_pause(\"In your hand you hold your trusty", "print_pause(\"Rumor has it that a \" + option + \" is somewhere around", "dark cave.\\n\") print_pause(\"In your hand you hold your trusty (but not very \"", "empty cave\" \" now.\") print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou peer", "peer cautiously into the cave.\") print_pause(\"\\nYou've been here before, and gotten all\" \"", "\"here, and has been terrifying the nearby village.\\n\") print_pause(\"In front of you is", "\"sword\" not in item: print_pause(\"You feel a bit under-prepared for this, \" \"what", "is somewhere around \" \"here, and has been terrifying the nearby village.\\n\") print_pause(\"In", "tiny dagger.\\n\") while True: choice2 = input(\"Would you like to (1) fight or", "time import random def print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself", "+ \" moves to attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword", "for the \" + option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break", "if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again ==", "to (1) fight or (2) \" \"run away?\") if choice2 == \"1\": if", "a glint of metal behind a \" \"rock.\") print_pause(\"\\nYou have found the magical", "the house.\") print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What would you like", "option + \" is somewhere around \" \"here, and has been terrifying the", "is no match for the \" + option + \".\") print_pause(\"\\nYou have been", "\"takes one look at \" \"your shiny new toy and runs away!\") print_pause(\"\\nYou", "else: play_again() def play_game(): item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\",", "cave.\\n\") print_pause(\"In your hand you hold your trusty (but not very \" \"effective)", "\"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter 1 to knock on the", "\" \"your shiny new toy and runs away!\") print_pause(\"\\nYou have rid the town", "\"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \" + option", "field(item, option) break def field(item, option): print_pause(\"Enter 1 to knock on the door", "play_game(): item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"]) intro(item, option)", "the field. \" \"\\nLuckily, you don't seem to have been \" \"followed.\\n\") field(item,", "\" \"rock.\") print_pause(\"\\nYou have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your", "only a very small cave.\") print_pause(\"\\nYour eye catches a glint of metal behind", "you hold your trusty (but not very \" \"effective) dagger.\\n\") def cave(item, option):", "input(\"Would you like to (1) fight or (2) \" \"run away?\") if choice2", "yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \" + option + \" is", "option) def house(item, option): print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou are", "+ option + \" attacks you!\\n\") if \"sword\" not in item: print_pause(\"You feel", "\" + option + \" attacks you!\\n\") if \"sword\" not in item: print_pause(\"You", "turns out to be only a very small cave.\") print_pause(\"\\nYour eye catches a", "steps a \" + option + \".\") print_pause(\"\\nEep! This is the \" +", "\" \"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter 1 to knock on", "\"your hand as you brace yourself for the \" \"attack.\") print_pause(\"\\nBut the \"", "for this, \" \"what with only having a tiny dagger.\\n\") while True: choice2", "found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and", "cave.\") print_pause(\"\\nYou've been here before, and gotten all\" \" the good stuff. It's", "is a dark cave.\\n\") print_pause(\"In your hand you hold your trusty (but not", "print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing!", "def play_game(): item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"]) intro(item,", "== \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else: play_again() def play_game():", "house!\") print_pause(\"\\nThe \" + option + \" attacks you!\\n\") if \"sword\" not in", "\"2\": print_pause(\"\\nYou run back into the field. \" \"\\nLuckily, you don't seem to", "your best...\") print_pause(\"but your dagger is no match for the \" + option", "time.\\n\\n\\n\") else: play_again() def play_game(): item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\",", "print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is no match for the \"", "print_pause(\"You feel a bit under-prepared for this, \" \"what with only having a", "is a house.\\n\") print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In your hand", "the cave.\") print_pause(\"\\nYou've been here before, and gotten all\" \" the good stuff.", "Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and take \" \"the sword with", "\" + option + \"takes one look at \" \"your shiny new toy", "+ option + \". You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but", "are about to knock when the door \" \"opens and out steps a", "magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and take \"", "to play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\")", "print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing in an open", "in item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been here before, and", "house(item, option) break elif choice1 == \"2\": cave(item, option) break def play_again(): again", "and out steps a \" + option + \".\") print_pause(\"\\nEep! This is the", "print_pause(\"\\nYou are about to knock when the door \" \"opens and out steps", "+ \"'s house!\") print_pause(\"\\nThe \" + option + \" attacks you!\\n\") if \"sword\"", "to have been \" \"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter 1", "house.\") print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What would you like to", "no match for the \" + option + \".\") print_pause(\"\\nYou have been defeated!\\n\")", "else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns out to be only", "your trusty (but not very \" \"effective) dagger.\\n\") def cave(item, option): if \"sword\"", "[] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"]) intro(item, option) field(item, option) play_game()", "== \"1\": house(item, option) break elif choice1 == \"2\": cave(item, option) break def", "a very small cave.\") print_pause(\"\\nYour eye catches a glint of metal behind a", "at \" \"your shiny new toy and runs away!\") print_pause(\"\\nYou have rid the", "print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In your hand you hold your", "having a tiny dagger.\\n\") while True: choice2 = input(\"Would you like to (1)", "fight or (2) \" \"run away?\") if choice2 == \"1\": if \"sward\" in", "in \" \"your hand as you brace yourself for the \" \"attack.\") print_pause(\"\\nBut", "and runs away!\") print_pause(\"\\nYou have rid the town of the \" + option", "right is a dark cave.\\n\") print_pause(\"In your hand you hold your trusty (but", "would you like to do?\") while True: choice1 = input(\"(Please enter 1 or", "the door \" \"opens and out steps a \" + option + \".\")", "field, filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a", "See you next time.\\n\\n\\n\") else: play_again() def play_game(): item = [] option =", "\" + option + \" moves to attack, \" \"you unsheath your new", "the town of the \" + option + \". You are victorious!\\n\") else:", "def house(item, option): print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou are about", "be only a very small cave.\") print_pause(\"\\nYour eye catches a glint of metal", "the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach the door of", "the door of the house.\") print_pause(\"\\nYou are about to knock when the door", "into the field. \" \"\\nLuckily, you don't seem to have been \" \"followed.\\n\")", "Restarting the game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See", "\" + option + \". You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\")", "a house.\\n\") print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In your hand you", "trusty (but not very \" \"effective) dagger.\\n\") def cave(item, option): if \"sword\" in", "don't seem to have been \" \"followed.\\n\") field(item, option) break def field(item, option):", "very \" \"effective) dagger.\\n\") def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer", "dagger and take \" \"the sword with you.\") print_pause(\"\\nYou walk back out to", "house.\\n\") print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In your hand you hold", "a bit under-prepared for this, \" \"what with only having a tiny dagger.\\n\")", "again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again == \"n\":", "to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns out", "you like to do?\") while True: choice1 = input(\"(Please enter 1 or 2.)\\n\")", "print_pause(\"In your hand you hold your trusty (but not very \" \"effective) dagger.\\n\")", "into the cave.\") print_pause(\"\\nIt turns out to be only a very small cave.\")", "unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in \" \"your", "\" + option + \".\") print_pause(\"\\nEep! This is the \" + option +", "== \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks", "print_pause(\"\\nAs the \" + option + \" moves to attack, \" \"you unsheath", "play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else:", "to knock on the door of the house.\") print_pause(\"Enter 2 to peer into", "metal behind a \" \"rock.\") print_pause(\"\\nYou have found the magical Sword of Ogoroth!\")", "+ \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou", "a tiny dagger.\\n\") while True: choice2 = input(\"Would you like to (1) fight", "all\" \" the good stuff. It's just an empty cave\" \" now.\") print_pause(\"\\nYou", "print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else: play_again() def play_game(): item =", "item: print_pause(\"\\nAs the \" + option + \" moves to attack, \" \"you", "elif choice1 == \"2\": cave(item, option) break def play_again(): again = input(\"Would you", "\"1\": if \"sward\" in item: print_pause(\"\\nAs the \" + option + \" moves", "if \"sward\" in item: print_pause(\"\\nAs the \" + option + \" moves to", "\". You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is", "attacks you!\\n\") if \"sword\" not in item: print_pause(\"You feel a bit under-prepared for", "print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou are about to knock when", "\" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \" +", "item: print_pause(\"You feel a bit under-prepared for this, \" \"what with only having", "def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously into the cave.\")", "to be only a very small cave.\") print_pause(\"\\nYour eye catches a glint of", "print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou run back", "\"your shiny new toy and runs away!\") print_pause(\"\\nYou have rid the town of", "+ option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2 ==", "play_again() def play_game(): item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"])", "not in item: print_pause(\"You feel a bit under-prepared for this, \" \"what with", "print_pause(\"You find yourself standing in an open field, filled \" \"with grass and", "play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game()", "\"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else: play_again() def play_game(): item", "if choice2 == \"2\": print_pause(\"\\nYou run back into the field. \" \"\\nLuckily, you", "\" \"run away?\") if choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs the", "choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs the \" + option +", "print_pause(\"Enter 1 to knock on the door of the house.\") print_pause(\"Enter 2 to", "out to be only a very small cave.\") print_pause(\"\\nYour eye catches a glint", "hold your trusty (but not very \" \"effective) dagger.\\n\") def cave(item, option): if", "your silly old dagger and take \" \"the sword with you.\") print_pause(\"\\nYou walk", "cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've", "toy and runs away!\") print_pause(\"\\nYou have rid the town of the \" +", "(1) fight or (2) \" \"run away?\") if choice2 == \"1\": if \"sward\"", "Sword of Ogoroth shines brightly in \" \"your hand as you brace yourself", "option) break def field(item, option): print_pause(\"Enter 1 to knock on the door of", "back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns", "door of the house.\") print_pause(\"\\nYou are about to knock when the door \"", "\" the good stuff. It's just an empty cave\" \" now.\") print_pause(\"\\nYou walk", "print_pause(\"\\nIt turns out to be only a very small cave.\") print_pause(\"\\nYour eye catches", "house(item, option): print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou are about to", "cave.\") print_pause(\"\\nYour eye catches a glint of metal behind a \" \"rock.\") print_pause(\"\\nYou", "the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and take", "+ option + \"'s house!\") print_pause(\"\\nThe \" + option + \" attacks you!\\n\")", "\"what with only having a tiny dagger.\\n\") while True: choice2 = input(\"Would you", "you brace yourself for the \" \"attack.\") print_pause(\"\\nBut the \" + option +", "choice1 == \"2\": cave(item, option) break def play_again(): again = input(\"Would you like", "you next time.\\n\\n\\n\") else: play_again() def play_game(): item = [] option = random.choice([\"pirate\",", "match for the \" + option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again()", "print_pause(\"\\nThe Sword of Ogoroth shines brightly in \" \"your hand as you brace", "if \"sword\" not in item: print_pause(\"You feel a bit under-prepared for this, \"", "print_pause(\"\\nBut the \" + option + \"takes one look at \" \"your shiny", "print_pause(\"\\nYou have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old", "\" + option + \" is somewhere around \" \"here, and has been", "you don't seem to have been \" \"followed.\\n\") field(item, option) break def field(item,", "been terrifying the nearby village.\\n\") print_pause(\"In front of you is a house.\\n\") print_pause(\"To", "stuff. It's just an empty cave\" \" now.\") print_pause(\"\\nYou walk back to the", "approach the door of the house.\") print_pause(\"\\nYou are about to knock when the", "option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if choice2 == \"2\":", "walk back out to the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou", "shines brightly in \" \"your hand as you brace yourself for the \"", "new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in \" \"your hand as", "the \" + option + \"'s house!\") print_pause(\"\\nThe \" + option + \"", "one look at \" \"your shiny new toy and runs away!\") print_pause(\"\\nYou have", "\" \"here, and has been terrifying the nearby village.\\n\") print_pause(\"In front of you", "is the \" + option + \"'s house!\") print_pause(\"\\nThe \" + option +", "victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is no match for", "\"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for", "filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that a \"", "a \" + option + \" is somewhere around \" \"here, and has", "\" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in", "you like to (1) fight or (2) \" \"run away?\") if choice2 ==", "now.\") print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into the", "the \" + option + \".\") print_pause(\"\\nYou have been defeated!\\n\") play_again() break if", "small cave.\") print_pause(\"\\nYour eye catches a glint of metal behind a \" \"rock.\")", "sword with you.\") print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\") field(item, option)", "def print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing in an", "and has been terrifying the nearby village.\\n\") print_pause(\"In front of you is a", "discard your silly old dagger and take \" \"the sword with you.\") print_pause(\"\\nYou", "standing in an open field, filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor", "an empty cave\" \" now.\") print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou", "on the door of the house.\") print_pause(\"Enter 2 to peer into the cave.\")", "1 to knock on the door of the house.\") print_pause(\"Enter 2 to peer", "that a \" + option + \" is somewhere around \" \"here, and", "\"rock.\") print_pause(\"\\nYou have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly", "been here before, and gotten all\" \" the good stuff. It's just an", "it that a \" + option + \" is somewhere around \" \"here,", "print_pause(\"\\nYou discard your silly old dagger and take \" \"the sword with you.\")", "for the \" \"attack.\") print_pause(\"\\nBut the \" + option + \"takes one look", "= input(\"(Please enter 1 or 2.)\\n\") if choice1 == \"1\": house(item, option) break", "this, \" \"what with only having a tiny dagger.\\n\") while True: choice2 =", "random def print_pause(message_to_print): print(message_to_print) time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing in", "out steps a \" + option + \".\") print_pause(\"\\nEep! This is the \"", "do?\") while True: choice1 = input(\"(Please enter 1 or 2.)\\n\") if choice1 ==", "item = [] option = random.choice([\"pirate\", \"fairy\", \"dragon\", \"gorgon\", \"troll\"]) intro(item, option) field(item,", "print_pause(\"\\nYou run back into the field. \" \"\\nLuckily, you don't seem to have", "of Ogoroth shines brightly in \" \"your hand as you brace yourself for", "like to do?\") while True: choice1 = input(\"(Please enter 1 or 2.)\\n\") if", "\"sword\" in item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been here before,", "eye catches a glint of metal behind a \" \"rock.\") print_pause(\"\\nYou have found", "\" attacks you!\\n\") if \"sword\" not in item: print_pause(\"You feel a bit under-prepared", "hand as you brace yourself for the \" \"attack.\") print_pause(\"\\nBut the \" +", "shiny new toy and runs away!\") print_pause(\"\\nYou have rid the town of the", "You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger is no", "choice1 == \"1\": house(item, option) break elif choice1 == \"2\": cave(item, option) break", "you is a house.\\n\") print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In your", "cautiously into the cave.\") print_pause(\"\\nYou've been here before, and gotten all\" \" the", "away?\") if choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs the \" +", "to knock when the door \" \"opens and out steps a \" +", "before, and gotten all\" \" the good stuff. It's just an empty cave\"", "input(\"(Please enter 1 or 2.)\\n\") if choice1 == \"1\": house(item, option) break elif", "+ \". You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your dagger", "elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\") else: play_again()", "the nearby village.\\n\") print_pause(\"In front of you is a house.\\n\") print_pause(\"To your right", "defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou run back into the field.", "town of the \" + option + \". You are victorious!\\n\") else: print_pause(\"\\nYou", "cave.\") print_pause(\"\\nIt turns out to be only a very small cave.\") print_pause(\"\\nYour eye", "while True: choice2 = input(\"Would you like to (1) fight or (2) \"", "\"sward\" in item: print_pause(\"\\nAs the \" + option + \" moves to attack,", "the cave.\") print_pause(\"\\nIt turns out to be only a very small cave.\") print_pause(\"\\nYour", "look at \" \"your shiny new toy and runs away!\") print_pause(\"\\nYou have rid", "do your best...\") print_pause(\"but your dagger is no match for the \" +", "option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been", "peer into the cave.\") print_pause(\"What would you like to do?\") while True: choice1", "def field(item, option): print_pause(\"Enter 1 to knock on the door of the house.\")", "your hand you hold your trusty (but not very \" \"effective) dagger.\\n\") def", "\" \"your hand as you brace yourself for the \" \"attack.\") print_pause(\"\\nBut the", "with you.\") print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\") field(item, option) def", "item: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nYou've been here before, and gotten", "been defeated!\\n\") play_again() break if choice2 == \"2\": print_pause(\"\\nYou run back into the", "about to knock when the door \" \"opens and out steps a \"", "moves to attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth", "\" moves to attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword of", "very small cave.\") print_pause(\"\\nYour eye catches a glint of metal behind a \"", "the \" + option + \"takes one look at \" \"your shiny new", "\"opens and out steps a \" + option + \".\") print_pause(\"\\nEep! This is", "out to the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach the", "village.\\n\") print_pause(\"In front of you is a house.\\n\") print_pause(\"To your right is a", "here before, and gotten all\" \" the good stuff. It's just an empty", "print_pause(\"In front of you is a house.\\n\") print_pause(\"To your right is a dark", "field. \" \"\\nLuckily, you don't seem to have been \" \"followed.\\n\") field(item, option)", "dagger is no match for the \" + option + \".\") print_pause(\"\\nYou have", "\" \"effective) dagger.\\n\") def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou peer cautiously", "option + \"'s house!\") print_pause(\"\\nThe \" + option + \" attacks you!\\n\") if", "def intro(item, option): print_pause(\"You find yourself standing in an open field, filled \"", "new toy and runs away!\") print_pause(\"\\nYou have rid the town of the \"", "\" + option + \"'s house!\") print_pause(\"\\nThe \" + option + \" attacks", "print_pause(\"\\nEep! This is the \" + option + \"'s house!\") print_pause(\"\\nThe \" +", "== \"2\": cave(item, option) break def play_again(): again = input(\"Would you like to", "to attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines", "have found the magical Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger", "open field, filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it that", "seem to have been \" \"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter", "option + \".\") print_pause(\"\\nEep! This is the \" + option + \"'s house!\")", "cave\" \" now.\") print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously", "== \"1\": if \"sward\" in item: print_pause(\"\\nAs the \" + option + \"", "or 2.)\\n\") if choice1 == \"1\": house(item, option) break elif choice1 == \"2\":", "option + \". You are victorious!\\n\") else: print_pause(\"\\nYou do your best...\") print_pause(\"but your", "print_pause(\"\\nYour eye catches a glint of metal behind a \" \"rock.\") print_pause(\"\\nYou have", "field(item, option) def house(item, option): print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou", "playing! See you next time.\\n\\n\\n\") else: play_again() def play_game(): item = [] option", "to do?\") while True: choice1 = input(\"(Please enter 1 or 2.)\\n\") if choice1", "of you is a house.\\n\") print_pause(\"To your right is a dark cave.\\n\") print_pause(\"In", "+ option + \".\") print_pause(\"\\nEep! This is the \" + option + \"'s", "the good stuff. It's just an empty cave\" \" now.\") print_pause(\"\\nYou walk back", "the game ...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you", "Sword of Ogoroth!\") print_pause(\"\\nYou discard your silly old dagger and take \" \"the", "terrifying the nearby village.\\n\") print_pause(\"In front of you is a house.\\n\") print_pause(\"To your", "option) break def play_again(): again = input(\"Would you like to play again? (y/n)\").lower()", "you!\\n\") if \"sword\" not in item: print_pause(\"You feel a bit under-prepared for this,", "the cave.\") print_pause(\"What would you like to do?\") while True: choice1 = input(\"(Please", "the \" \"attack.\") print_pause(\"\\nBut the \" + option + \"takes one look at", "you like to play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the", "your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly in \" \"your hand", "option): print_pause(\"Enter 1 to knock on the door of the house.\") print_pause(\"Enter 2", "play_again(): again = input(\"Would you like to play again? (y/n)\").lower() if again ==", "of the house.\") print_pause(\"\\nYou are about to knock when the door \" \"opens", "choice2 = input(\"Would you like to (1) fight or (2) \" \"run away?\")", "option + \" moves to attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe", "attack, \" \"you unsheath your new sword.\") print_pause(\"\\nThe Sword of Ogoroth shines brightly", "rid the town of the \" + option + \". You are victorious!\\n\")", "2 to peer into the cave.\") print_pause(\"What would you like to do?\") while", "print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option):", "...\\n\\n\\n\") play_game() elif again == \"n\": print_pause(\"\\n\\n\\nThanks for playing! See you next time.\\n\\n\\n\")", "has it that a \" + option + \" is somewhere around \"", "field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach the door of the", "break if choice2 == \"2\": print_pause(\"\\nYou run back into the field. \" \"\\nLuckily,", "intro(item, option): print_pause(\"You find yourself standing in an open field, filled \" \"with", "take \" \"the sword with you.\") print_pause(\"\\nYou walk back out to the field.\\n\")", "door of the house.\") print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What would", "catches a glint of metal behind a \" \"rock.\") print_pause(\"\\nYou have found the", "like to (1) fight or (2) \" \"run away?\") if choice2 == \"1\":", "into the cave.\") print_pause(\"\\nYou've been here before, and gotten all\" \" the good", "if choice2 == \"1\": if \"sward\" in item: print_pause(\"\\nAs the \" + option", "glint of metal behind a \" \"rock.\") print_pause(\"\\nYou have found the magical Sword", "print_pause(\"but your dagger is no match for the \" + option + \".\")", "knock on the door of the house.\") print_pause(\"Enter 2 to peer into the", "+ \" is somewhere around \" \"here, and has been terrifying the nearby", "field.\\n\") else: print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns out to be", "with only having a tiny dagger.\\n\") while True: choice2 = input(\"Would you like", "option): print_pause(\"\\nYou approach the door of the house.\") print_pause(\"\\nYou are about to knock", "cave.\") print_pause(\"What would you like to do?\") while True: choice1 = input(\"(Please enter", "\" \"\\nLuckily, you don't seem to have been \" \"followed.\\n\") field(item, option) break", "(y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting the game ...\\n\\n\\n\") play_game() elif again", "bit under-prepared for this, \" \"what with only having a tiny dagger.\\n\") while", "and take \" \"the sword with you.\") print_pause(\"\\nYou walk back out to the", "\"attack.\") print_pause(\"\\nBut the \" + option + \"takes one look at \" \"your", "feel a bit under-prepared for this, \" \"what with only having a tiny", "good stuff. It's just an empty cave\" \" now.\") print_pause(\"\\nYou walk back to", "for playing! See you next time.\\n\\n\\n\") else: play_again() def play_game(): item = []", "This is the \" + option + \"'s house!\") print_pause(\"\\nThe \" + option", "break def play_again(): again = input(\"Would you like to play again? (y/n)\").lower() if", "just an empty cave\" \" now.\") print_pause(\"\\nYou walk back to the field.\\n\") else:", "not very \" \"effective) dagger.\\n\") def cave(item, option): if \"sword\" in item: print_pause(\"\\nYou", "input(\"Would you like to play again? (y/n)\").lower() if again == \"y\": print_pause(\"\\n\\n\\nExcellent! Restarting", "\" \"the sword with you.\") print_pause(\"\\nYou walk back out to the field.\\n\") item.append(\"sword\")", "print_pause(\"\\nYou peer cautiously into the cave.\") print_pause(\"\\nIt turns out to be only a", "print_pause(\"What would you like to do?\") while True: choice1 = input(\"(Please enter 1", "an open field, filled \" \"with grass and yellow wildflowers.\\n\") print_pause(\"Rumor has it", "+ \".\") print_pause(\"\\nEep! This is the \" + option + \"'s house!\") print_pause(\"\\nThe", "cave(item, option) break def play_again(): again = input(\"Would you like to play again?", "+ \" attacks you!\\n\") if \"sword\" not in item: print_pause(\"You feel a bit", "again = input(\"Would you like to play again? (y/n)\").lower() if again == \"y\":", "wildflowers.\\n\") print_pause(\"Rumor has it that a \" + option + \" is somewhere", "or (2) \" \"run away?\") if choice2 == \"1\": if \"sward\" in item:", "the house.\") print_pause(\"\\nYou are about to knock when the door \" \"opens and", "\".\") print_pause(\"\\nEep! This is the \" + option + \"'s house!\") print_pause(\"\\nThe \"", "if choice1 == \"1\": house(item, option) break elif choice1 == \"2\": cave(item, option)", "nearby village.\\n\") print_pause(\"In front of you is a house.\\n\") print_pause(\"To your right is", "time.sleep(2) def intro(item, option): print_pause(\"You find yourself standing in an open field, filled", "back out to the field.\\n\") item.append(\"sword\") field(item, option) def house(item, option): print_pause(\"\\nYou approach", "of the house.\") print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What would you", "yourself standing in an open field, filled \" \"with grass and yellow wildflowers.\\n\")", "hand you hold your trusty (but not very \" \"effective) dagger.\\n\") def cave(item,", "\" now.\") print_pause(\"\\nYou walk back to the field.\\n\") else: print_pause(\"\\nYou peer cautiously into", "dagger.\\n\") while True: choice2 = input(\"Would you like to (1) fight or (2)", "silly old dagger and take \" \"the sword with you.\") print_pause(\"\\nYou walk back", "to peer into the cave.\") print_pause(\"What would you like to do?\") while True:", "print_pause(\"Enter 2 to peer into the cave.\") print_pause(\"What would you like to do?\")", "have been \" \"followed.\\n\") field(item, option) break def field(item, option): print_pause(\"Enter 1 to", "your dagger is no match for the \" + option + \".\") print_pause(\"\\nYou", "\"1\": house(item, option) break elif choice1 == \"2\": cave(item, option) break def play_again():" ]
[]
[ "template = TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s -", "import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute =", "import str from django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class", "from builtins import str from django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory,", "TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template)", "measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute", "= TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s", "import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template", "RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\" % (template.name, attribute.name, rating.name), str(rating))", "rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\" % (template.name, attribute.name,", "builtins import str from django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory", "= RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\" % (template.name, attribute.name, rating.name),", "AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\" % (template.name,", "RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating =", "RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1)", "attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\"", "django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self):", "AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating", "TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template =", "str from django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase):", "class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute,", "TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s -", "from django.test import TestCase from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def", "from measure_mate.tests.factories import TemplateFactory, AttributeFactory, RatingFactory class RatingTestCases(TestCase): def test_creation_of_rating(self): template = TemplateFactory()", "def test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean()", "= AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s - %s - %s\" %", "test_creation_of_rating(self): template = TemplateFactory() attribute = AttributeFactory(template=template) rating = RatingFactory(attribute=attribute, rank=1) rating.clean() self.assertEqual(\"%s" ]
[ "1), coords], axis=-1) for cl in classes: cmask = R == cl smask", "Determine centroid of a region R in segmented image \"\"\" classes = np.unique(R)[1:]", "bbox[5]] = np.where( wss != 0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "numbers and means in voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb", "seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes))", "print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters,", "cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def", "print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose:", "min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]", "max_class] = 0 nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss,", "max_iters ): if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it", "unique_counts if i[0] != 0] specie = unique_counts[-1][0] return specie def centroids(seg_img, R):", "S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if return_ws: return np.array(atoms),", "Rp + max_class # sub region with classes relabelled Rp[Rp == max_class] =", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( wss != 0,", "== cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie !=", "return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i for i", "in a region R in segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img,", "return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x:", "erode: return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox):", "0) intensity_cl = np.where(labels == cl, intensity, 0) species_cl = np.where(labels == cl,", "bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] =", "as plt import numpy as np from scipy.ndimage.morphology import distance_transform_edt from skimage import", "segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class =", "): if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it +", "morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >=", "np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords],", "if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers = measure.label(fg) markers +=", "1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False):", "= seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels)", "Matrix for storing result R = np.zeros(binary.shape) binary = binary.astype(int) # 1. Label", "means in voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb = Sb.squeeze()", "density/species matrices Returns the atom z numbers and means in voxel coordinates\"\"\" M", "bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue # Get the foreground, bg", "def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]", "+ 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0],", "import matplotlib.pyplot as plt import numpy as np from scipy.ndimage.morphology import distance_transform_edt from", "morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] :", "region with classes relabelled wss[wss == max_class] = 0 nclasses = len(np.unique(wss)) -", "np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3)", "seg_classes: if verbose: print(\"Class\", cl) # Crop the images binary_cl = np.where(labels ==", "= binary.astype(int) # 1. Label the connected components labels = measure.label(binary, connectivity=1) seg_classes,", "np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class = np.max(R) R[bbox[0]", "np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the connected components labels = measure.label(binary,", "the atom z numbers and means in voxel coordinates\"\"\" M = M.squeeze() S", "it < max_iters ): if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox,", "voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R =", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity,", "in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes != 0] if", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R,", "S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0]", "bbox[2] : bbox[5]] = np.where( wss != 0, wss, R[bbox[0] : bbox[3], bbox[1]", "0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts", "range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes != 0] if verbose:", "1 markers[unknown == 1] = 0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss", "= region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl,", "bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( wss != 0, wss, R[bbox[0]", ": bbox[4], bbox[2] : bbox[5]] = np.where( wss != 0, wss, R[bbox[0] :", "the connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes", "np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc", "# Determine wether or not to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox)", "1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0]", "seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\", cl) # Crop the", "max_class # sub region with classes relabelled wss[wss == max_class] = 0 nclasses", "intensity, 0) species_cl = np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl)", "== 1] = 0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1]", "plot_points_3d(fg) # Markers for ws markers = measure.label(fg) markers += 1 markers[unknown ==", "= morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1]", "classes: cmask = R == cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R,", "R): \"\"\" Determine centroid of a region R in segmented image \"\"\" classes", "image \"\"\" classes = np.unique(R)[1:] atoms = [] means = [] xc =", "= M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M,", "seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True)", "recursive watershed segmentation \"\"\" # Matrix for storing result R = np.zeros(binary.shape) binary", "print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers = measure.label(fg) markers += 1 markers[unknown", "unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i for i in", ": bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox == cl, max_class + 1,", "intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox", "unique_counts = [i for i in unique_counts if i[0] != 0] specie =", "np.max(R) Rp = Rp + max_class # sub region with classes relabelled Rp[Rp", "= Rp + max_class # sub region with classes relabelled Rp[Rp == max_class]", "max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of species matrix using recursive", "verbose=verbose) atoms, means = centroids(S, R) if return_ws: return np.array(atoms), np.array(means), R else:", "nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss)", "= np.where(labels == cl, labels, 0) intensity_cl = np.where(labels == cl, intensity, 0)", ": bbox[5]] = np.where( binary_bbox == cl, max_class + 1, R[bbox[0] : bbox[3],", "species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp", "= np.where( binary_bbox == cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1] :", "Rp = Rp + max_class # sub region with classes relabelled Rp[Rp ==", "0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1])", "+ 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32,", "32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl in", "np.where( wss != 0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot as plt import numpy as", "np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] >", "bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "for i in unique_counts if i[0] != 0] specie = unique_counts[-1][0] return specie", "region R in segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if", "!= 0] specie = unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine centroid", "the density/species matrices Returns the atom z numbers and means in voxel coordinates\"\"\"", "else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] :", "## License: MIT ## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\"", "with classes relabelled Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3], bbox[1] :", "R = np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the connected components labels", "labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i]", "== cl, labels, 0) intensity_cl = np.where(labels == cl, intensity, 0) species_cl =", "atoms = [] means = [] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]", "Crop the images binary_cl = np.where(labels == cl, labels, 0) intensity_cl = np.where(labels", "relabelled wss[wss == max_class] = 0 nclasses = len(np.unique(wss)) - 1 if verbose:", "= np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the connected components labels =", "from skimage import filters, measure, morphology, segmentation from viz import plot_points_3d def get_background(S,", "for i in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes !=", "convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity:", "verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3],", "= np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(", "/ wmin) > len(np.unique(wss)) - 1 and it < max_iters ): if verbose:", "vote of class cl in a region R in segmented image\"\"\" binary_label_map =", "xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] +", ": bbox[4], bbox[2] : bbox[5]], ) continue # Get the foreground, bg etc.", "of species matrix using recursive watershed segmentation \"\"\" # Matrix for storing result", "wether or not to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown =", "return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or not to segment", "max_class = np.max(R) Rp = Rp + max_class # sub region with classes", "of class cl in a region R in segmented image\"\"\" binary_label_map = np.where(R", "cl, labels, 0) intensity_cl = np.where(labels == cl, intensity, 0) species_cl = np.where(labels", "[] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0]", "specie = unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine centroid of a", "in seg_classes: if verbose: print(\"Class\", cl) # Crop the images binary_cl = np.where(labels", "2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot as plt import", "storing result R = np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the connected", "def majority_vote(seg_img, R, cl): \"\"\" Majority vote of class cl in a region", "means = centroids(S, R) if return_ws: return np.array(atoms), np.array(means), R else: return np.array(atoms),", "chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if", "filters, measure, morphology, segmentation from viz import plot_points_3d def get_background(S, kernel_size=1): kernel =", "= get_background(binary_bbox) unknown = bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers", "nclasses) # Determine wether or not to segment again on the basis of", "- 1 and it < max_iters ): if verbose: print(\"Segmenting again\") Rp =", "if verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\" Majority", "= 0 max_class = np.max(R) wss = wss + max_class # sub region", "it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or not", "intensity_cl = np.where(labels == cl, intensity, 0) species_cl = np.where(labels == cl, species,", "== cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox =", "product import matplotlib.pyplot as plt import numpy as np from scipy.ndimage.morphology import distance_transform_edt", "print(\"Class\", cl) # Crop the images binary_cl = np.where(labels == cl, labels, 0)", "markers[unknown == 1] = 0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss ==", "wss = wss + max_class # sub region with classes relabelled wss[wss ==", "cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl,", "x[1]) unique_counts = [i for i in unique_counts if i[0] != 0] specie", "bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8,", "classes = np.unique(R)[1:] atoms = [] means = [] xc = np.linspace(0, R.shape[0],", "from viz import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel)", "atom z numbers and means in voxel coordinates\"\"\" M = M.squeeze() S =", "cl) # Crop the images binary_cl = np.where(labels == cl, labels, 0) intensity_cl", "## Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME> & <NAME>", "wss[wss == max_class] = 0 nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\",", "counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts =", "seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\", cl) #", "region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox)", "\"\"\" Majority vote of class cl in a region R in segmented image\"\"\"", "= seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:, 1:],", "= [i for i in unique_counts if i[0] != 0] specie = unique_counts[-1][0]", "morphology, segmentation from viz import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return", "and object counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and", "bbox[4], bbox[2] : bbox[5]], ) continue # Get the foreground, bg etc. #", "+= 1 markers[unknown == 1] = 0 # WS wss = segmentation.watershed(binary_bbox, markers)", "+ max_class # sub region with classes relabelled Rp[Rp == max_class] = 0", "verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\" Majority vote", "bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return", "segmentation \"\"\" # Matrix for storing result R = np.zeros(binary.shape) binary = binary.astype(int)", "= morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity", "R == cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie", "+ 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp +", "print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\", cl) # Crop", "verbose: print(\"Class\", cl) # Crop the images binary_cl = np.where(labels == cl, labels,", "return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode: return S else:", "in unique_counts if i[0] != 0] specie = unique_counts[-1][0] return specie def centroids(seg_img,", "/ np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class = np.max(R)", "0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else:", "= [] means = [] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc", "foreground, bg etc. # Determine wether or not to erode fg = get_foreground(binary_bbox)", "x: x[1]) unique_counts = [i for i in unique_counts if i[0] != 0]", "atoms, means = centroids(S, R) if return_ws: return np.array(atoms), np.array(means), R else: return", "bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox == cl, max_class +", "species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if", "np from scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure, morphology, segmentation from", "intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of species", "region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox =", "in segmented image \"\"\" classes = np.unique(R)[1:] atoms = [] means = []", "convexity >= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "[] means = [] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc =", "max_class = np.max(R) wss = wss + max_class # sub region with classes", "wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class = np.max(R) wss", "- 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses)", "get_background(binary_bbox) unknown = bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for", "in segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) ==", "skimage import filters, measure, morphology, segmentation from viz import plot_points_3d def get_background(S, kernel_size=1):", "= np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl in classes: cmask =", ": bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]", "kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode:", "and means in voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb =", "Computes segmented form of species matrix using recursive watershed segmentation \"\"\" # Matrix", "specie = majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie)", "bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei(", ": bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ):", "seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it)", "bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False,", "\"\"\" # Matrix for storing result R = np.zeros(binary.shape) binary = binary.astype(int) #", "basis of convexity and object counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss))", "for storing result R = np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the", "segment again on the basis of convexity and object counts if ( int(np.count_nonzero(wss)", "R in segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map)", "= np.unique(R)[1:] atoms = [] means = [] xc = np.linspace(0, R.shape[0], R.shape[0]", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1]", "np.where( binary_bbox == cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "etc. # Determine wether or not to erode fg = get_foreground(binary_bbox) bg =", "# Determine wether or not to segment again on the basis of convexity", "+ max_class # sub region with classes relabelled wss[wss == max_class] = 0", "connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i in", "R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc =", ") max_class = np.max(R) Rp = Rp + max_class # sub region with", "fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg - fg if verbose:", "1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp + max_class", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( wss !=", "\"\"\" ## Functions for computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email:", "License: MIT ## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from", "axis=-1) for cl in classes: cmask = R == cl smask = seg_img_coords[cmask]", "= R == cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if", "== cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts =", "max_class + 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], )", "( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and it < max_iters ):", "np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i for", "Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME>", "<NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot", "binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form", "or not to segment again on the basis of convexity and object counts", "R, cl): \"\"\" Majority vote of class cl in a region R in", "form of species matrix using recursive watershed segmentation \"\"\" # Matrix for storing", "bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity", "print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or", "segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0:", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox == cl, max_class", "and species of atoms in the density/species matrices Returns the atom z numbers", "get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if", "Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT ##", "= np.where( wss != 0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "binary = binary.astype(int) # 1. Label the connected components labels = measure.label(binary, connectivity=1)", "wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of species matrix", "plt import numpy as np from scipy.ndimage.morphology import distance_transform_edt from skimage import filters,", "markers += 1 markers[unknown == 1] = 0 # WS wss = segmentation.watershed(binary_bbox,", "matplotlib.pyplot as plt import numpy as np from scipy.ndimage.morphology import distance_transform_edt from skimage", "if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes:", "if i[0] != 0] specie = unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\"", "return specie def centroids(seg_img, R): \"\"\" Determine centroid of a region R in", "atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids", "get_foreground(S, kernel_size=1, erode=True): if not erode: return S else: kernel = morphology.ball(kernel_size) return", "np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords", "def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms", "max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] =", "unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts", "if verbose: print(\"Class\", cl) # Crop the images binary_cl = np.where(labels == cl,", "bbox[5]] = np.where( binary_bbox == cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1]", "!= 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb,", "return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary,", "Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]", "1 and it < max_iters ): if verbose: print(\"Segmenting again\") Rp = segment_nuclei(", "kernel) def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "cl in seg_classes: if verbose: print(\"Class\", cl) # Crop the images binary_cl =", "segmentation from viz import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S,", "R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if", "def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True):", "1. Label the connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels,", "= centroids(S, R) if return_ws: return np.array(atoms), np.array(means), R else: return np.array(atoms), np.array(means)", "Functions for computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ##", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( Rp != 0,", "= segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class = np.max(R) wss =", "if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1,", "> 3] ) seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\",", "to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg - fg", "sub region with classes relabelled Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3],", "bbox[2] : bbox[5]] = np.where( binary_bbox == cl, max_class + 1, R[bbox[0] :", "convexity and object counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1", "cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie != 0:", "= crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose:", "= measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl,", "len(np.unique(wss)) - 1 and it < max_iters ): if verbose: print(\"Segmenting again\") Rp", "np.where(labels == cl, labels, 0) intensity_cl = np.where(labels == cl, intensity, 0) species_cl", "print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or not to segment again on", "bbox[4], bbox[2] : bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0] : bbox[3],", "seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3] )", "len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin),", "region with classes relabelled Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3], bbox[1]", "== max_class] = 0 nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\", it,", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8,", "measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i", "seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl in classes: cmask", "bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4],", "yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1)", "return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms in the density/species matrices Returns", "3] ) seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes)", "plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\", cl) # Crop the images", "0 nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss)", "1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32,", "watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0 --------------------------------------------------", "crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox)", "species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox)", "unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine centroid of a region R", "watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms in", "wss + max_class # sub region with classes relabelled wss[wss == max_class] =", "return_counts=True) seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3]", "the foreground, bg etc. # Determine wether or not to erode fg =", "binary_cl = np.where(labels == cl, labels, 0) intensity_cl = np.where(labels == cl, intensity,", "Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms in the density/species", "# Crop the images binary_cl = np.where(labels == cl, labels, 0) intensity_cl =", "= np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]", ") if verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\"", "return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return", "get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg)", "= crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull =", "labels, 0) intensity_cl = np.where(labels == cl, intensity, 0) species_cl = np.where(labels ==", "unknown = bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws", "= morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode: return", "R, cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means", "bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull)", "= np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i]", "R def majority_vote(seg_img, R, cl): \"\"\" Majority vote of class cl in a", "import distance_transform_edt from skimage import filters, measure, morphology, segmentation from viz import plot_points_3d", "= np.max(R) Rp = Rp + max_class # sub region with classes relabelled", "cl in classes: cmask = R == cl smask = seg_img_coords[cmask] specie =", "from scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure, morphology, segmentation from viz", "means = [] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0,", "Majority vote of class cl in a region R in segmented image\"\"\" binary_label_map", "np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes =", "sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i for i in unique_counts if", "[seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes", "wmin) > len(np.unique(wss)) - 1 and it < max_iters ): if verbose: print(\"Segmenting", "binary_bbox == cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "& <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot as", "a region R in segmented image \"\"\" classes = np.unique(R)[1:] atoms = []", "if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and it < max_iters", ") seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\",", "classes relabelled Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "== 0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)),", "R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords =", "zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for", "!= 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl", "again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity,", "majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms,", "<EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME> &", "1] = 0 max_class = np.max(R) wss = wss + max_class # sub", "bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0]", "atoms in the density/species matrices Returns the atom z numbers and means in", "bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1,", "# Markers for ws markers = measure.label(fg) markers += 1 markers[unknown == 1]", "1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D", "species matrix using recursive watershed segmentation \"\"\" # Matrix for storing result R", "numpy as np from scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure, morphology,", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue # Get", "np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or not to", "wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R)", "!= 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] )", "return R def majority_vote(seg_img, R, cl): \"\"\" Majority vote of class cl in", "= majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return", "cl): \"\"\" Majority vote of class cl in a region R in segmented", "M = M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S,", "wss[wss == 1] = 0 max_class = np.max(R) wss = wss + max_class", "## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright", "watershed segmentation \"\"\" # Matrix for storing result R = np.zeros(binary.shape) binary =", "segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class = np.max(R) wss = wss", "in the density/species matrices Returns the atom z numbers and means in voxel", "max_class] = 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] =", "R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords", "wether or not to segment again on the basis of convexity and object", "else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( wss", "centroids(seg_img, R): \"\"\" Determine centroid of a region R in segmented image \"\"\"", "Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "= np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique,", "= np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox", "kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode: return S else: kernel =", "zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32,", "not to segment again on the basis of convexity and object counts if", "!= 0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], )", "32, 32, 1), coords], axis=-1) for cl in classes: cmask = R ==", "Determine wether or not to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown", "# 1. Label the connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts =", "using recursive watershed segmentation \"\"\" # Matrix for storing result R = np.zeros(binary.shape)", "continue # Get the foreground, bg etc. # Determine wether or not to", "import filters, measure, morphology, segmentation from viz import plot_points_3d def get_background(S, kernel_size=1): kernel", "as np from scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure, morphology, segmentation", "i in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes != 0]", "plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether or not to segment again", "on the basis of convexity and object counts if ( int(np.count_nonzero(wss) / wmin)", "of a region R in segmented image \"\"\" classes = np.unique(R)[1:] atoms =", "= np.where( Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "## Functions for computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL>", "or not to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg", "coords], axis=-1) for cl in classes: cmask = R == cl smask =", "-------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D -------------------------------------------------", "distance_transform_edt from skimage import filters, measure, morphology, segmentation from viz import plot_points_3d def", "\"\"\"Determine centroids and species of atoms in the density/species matrices Returns the atom", "verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers = measure.label(fg) markers += 1", "crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def", "morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode: return S else: kernel", "np.count_nonzero(binary_label_map) == 0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique,", "if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class = np.max(R) R[bbox[0] :", "counts)), key=lambda x: x[1]) unique_counts = [i for i in unique_counts if i[0]", "ICSG3D ------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot as plt import numpy", "class cl in a region R in segmented image\"\"\" binary_label_map = np.where(R ==", "= segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if return_ws:", "crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\",", "Get the foreground, bg etc. # Determine wether or not to erode fg", "R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords =", "images binary_cl = np.where(labels == cl, labels, 0) intensity_cl = np.where(labels == cl,", "viz import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def", "result R = np.zeros(binary.shape) binary = binary.astype(int) # 1. Label the connected components", "+ 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc,", "not to erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg -", "== 1] = 0 max_class = np.max(R) wss = wss + max_class #", "if specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M,", "sub region with classes relabelled wss[wss == max_class] = 0 nclasses = len(np.unique(wss))", "> len(np.unique(wss)) - 1 and it < max_iters ): if verbose: print(\"Segmenting again\")", "axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine", "return atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and", "matrices Returns the atom z numbers and means in voxel coordinates\"\"\" M =", "relabelled Rp[Rp == max_class] = 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "= np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3],", "centroid of a region R in segmented image \"\"\" classes = np.unique(R)[1:] atoms", "i in unique_counts if i[0] != 0] specie = unique_counts[-1][0] return specie def", "if not erode: return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def", "segmented image \"\"\" classes = np.unique(R)[1:] atoms = [] means = [] xc", "plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1,", "seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts)", "for computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ## Version:", "erode=True): if not erode: return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel)", "convexity) if convexity >= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] :", "coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb,", "seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for", "morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2]", "cl in a region R in segmented image\"\"\" binary_label_map = np.where(R == cl,", "max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if return_ws: return np.array(atoms), np.array(means), R", "0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if", "np.where( Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms in the", "== cl, intensity, 0) species_cl = np.where(labels == cl, species, 0) region =", "bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl):", "for ws markers = measure.label(fg) markers += 1 markers[unknown == 1] = 0", "\"\"\" Determine centroid of a region R in segmented image \"\"\" classes =", "segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ##", "R in segmented image \"\"\" classes = np.unique(R)[1:] atoms = [] means =", "smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:,", "bbox[2] : bbox[5]], ) continue # Get the foreground, bg etc. # Determine", "-------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License:", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox == cl,", "cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]],", "means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False,", "Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright: Copyright <NAME> & <NAME> 2020,", "segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if return_ws: return", "verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if", "if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine", "counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and it <", "bbox[4], bbox[2] : bbox[5]] = np.where( wss != 0, wss, R[bbox[0] : bbox[3],", "1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue #", "species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of", "and it < max_iters ): if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss,", "intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity =", "verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose,", "for cl in classes: cmask = R == cl smask = seg_img_coords[cmask] specie", "segmented form of species matrix using recursive watershed segmentation \"\"\" # Matrix for", "= np.max(R) wss = wss + max_class # sub region with classes relabelled", "Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product import", "computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0", "= bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers", "S = S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose)", "means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of", "in classes: cmask = R == cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img,", "# Matrix for storing result R = np.zeros(binary.shape) binary = binary.astype(int) # 1.", "bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return R def", "np.where(labels == cl, intensity, 0) species_cl = np.where(labels == cl, species, 0) region", ") else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(", "= np.where(labels == cl, intensity, 0) species_cl = np.where(labels == cl, species, 0)", "= unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine centroid of a region", "it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of species matrix using", "np.max(R) wss = wss + max_class # sub region with classes relabelled wss[wss", "= get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg - fg if verbose: print(\"Segmenting\")", "0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( Rp", "verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) # Determine wether", "print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1]", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True))", "fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers = measure.label(fg) markers", ": bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R,", "species of atoms in the density/species matrices Returns the atom z numbers and", "R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0,", "): \"\"\" Computes segmented form of species matrix using recursive watershed segmentation \"\"\"", ": bbox[4], bbox[2] : bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0] :", "species_cl = np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox =", "def centroids(seg_img, R): \"\"\" Determine centroid of a region R in segmented image", "# Get the foreground, bg etc. # Determine wether or not to erode", "M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R) if return_ws: return np.array(atoms), np.array(means),", "[i for i in unique_counts if i[0] != 0] specie = unique_counts[-1][0] return", "majority_vote(seg_img, R, cl): \"\"\" Majority vote of class cl in a region R", "yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] +", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( Rp != 0, Rp,", "specie def centroids(seg_img, R): \"\"\" Determine centroid of a region R in segmented", "a[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species,", "bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img,", "verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp + max_class #", "np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity) if convexity >= min_convexity: max_class =", "not erode: return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a,", "import numpy as np from scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure,", "intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp =", "morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not erode: return S", "def get_foreground(S, kernel_size=1, erode=True): if not erode: return S else: kernel = morphology.ball(kernel_size)", "= crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox)", "print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\", cl)", "markers) wss[wss == 1] = 0 max_class = np.max(R) wss = wss +", "= 0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0", "WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class = np.max(R)", "region R in segmented image \"\"\" classes = np.unique(R)[1:] atoms = [] means", "it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in seg_classes: if verbose: print(\"Class\",", "min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp + max_class # sub region", "0] specie = unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine centroid of", "32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl", "Returns the atom z numbers and means in voxel coordinates\"\"\" M = M.squeeze()", "/ wmin), nclasses) # Determine wether or not to segment again on the", "## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import", "np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts", ": bbox[4], bbox[2] : bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5,", "\"\"\" classes = np.unique(R)[1:] atoms = [] means = [] xc = np.linspace(0,", "specie != 0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S,", "seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl) if specie != 0: means.append(np.mean(smask[:, 1:], axis=0))", "1] = 0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] =", "image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return", "= segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class", ") continue # Get the foreground, bg etc. # Determine wether or not", "\"\"\" Computes segmented form of species matrix using recursive watershed segmentation \"\"\" #", "< max_iters ): if verbose: print(\"Segmenting again\") Rp = segment_nuclei( wss, species_bbox, intensity_bbox,", "matrix using recursive watershed segmentation \"\"\" # Matrix for storing result R =", "= len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) /", ">= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "key=lambda x: x[1]) unique_counts = [i for i in unique_counts if i[0] !=", "# sub region with classes relabelled wss[wss == max_class] = 0 nclasses =", "kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S, kernel_size=1, erode=True): if not", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( wss != 0, wss,", "int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and it < max_iters ): if", "## Author: <NAME>. ## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox ==", "crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) /", "bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox =", "bbox) chull = morphology.convex_hull_image(binary_bbox) convexity = np.count_nonzero(binary_bbox) / np.count_nonzero(chull) if verbose: print(\"Convexity:\", convexity)", "32, 1), coords], axis=-1) for cl in classes: cmask = R == cl", "itertools import product import matplotlib.pyplot as plt import numpy as np from scipy.ndimage.morphology", "markers = measure.label(fg) markers += 1 markers[unknown == 1] = 0 # WS", "with classes relabelled wss[wss == max_class] = 0 nclasses = len(np.unique(wss)) - 1", "1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True)) plot_points_3d(wss) print(int(np.count_nonzero(wss) / wmin), nclasses) #", "coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32,", "import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size) return morphology.dilation(S, kernel) def get_foreground(S,", "the images binary_cl = np.where(labels == cl, labels, 0) intensity_cl = np.where(labels ==", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] :", "= measure.label(fg) markers += 1 markers[unknown == 1] = 0 # WS wss", "R.shape[0] + 1)[:-1] zc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc,", "bg etc. # Determine wether or not to erode fg = get_foreground(binary_bbox) bg", "measure, morphology, segmentation from viz import plot_points_3d def get_background(S, kernel_size=1): kernel = morphology.ball(kernel_size)", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it,", "min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented form of species matrix using recursive watershed", "= 0 nclasses = len(np.unique(wss)) - 1 if verbose: print(\"WS\", it, np.unique(wss, return_counts=True))", "0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda", "kernel_size=1, erode=True): if not erode: return S else: kernel = morphology.ball(kernel_size) return morphology.erosion(S,", "binary.astype(int) # 1. Label the connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts", "bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers =", "ws markers = measure.label(fg) markers += 1 markers[unknown == 1] = 0 #", "# sub region with classes relabelled Rp[Rp == max_class] = 0 R[bbox[0] :", "Rp, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0]", "np.unique(R)[1:] atoms = [] means = [] xc = np.linspace(0, R.shape[0], R.shape[0] +", "again on the basis of convexity and object counts if ( int(np.count_nonzero(wss) /", "max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp + max_class # sub", "max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species of atoms in the density/species matrices", "measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox)", "\"\"\" from itertools import product import matplotlib.pyplot as plt import numpy as np", "Rp = segment_nuclei( wss, species_bbox, intensity_bbox, it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, )", "bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue # Get the foreground,", "measure.label(fg) markers += 1 markers[unknown == 1] = 0 # WS wss =", "atoms, means def watershed_clustering(M, S, Sb, max_iters=5, return_ws=False, verbose=False): \"\"\"Determine centroids and species", "<NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product import matplotlib.pyplot as plt", "z numbers and means in voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze()", "0) species_cl = np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox", "= np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1]", "<NAME>. ## Email: <EMAIL> ## Version: 1.0.0 -------------------------------------------------- ## License: MIT ## Copyright:", "= S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms,", "0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox = crop(binary_cl, bbox) intensity_bbox", "cl, intensity, 0) species_cl = np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl,", ": bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue # Get the", "bbox[5]], ) continue # Get the foreground, bg etc. # Determine wether or", "= Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S,", "object counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) - 1 and it", "wss != 0, wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]],", "Markers for ws markers = measure.label(fg) markers += 1 markers[unknown == 1] =", "it=it + 1, verbose=verbose, max_iters=max_iters, min_convexity=min_convexity, ) max_class = np.max(R) Rp = Rp", "the basis of convexity and object counts if ( int(np.count_nonzero(wss) / wmin) >", "= np.unique(binary_label_map, return_counts=True) unique_counts = sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i", "verbose=False, ): \"\"\" Computes segmented form of species matrix using recursive watershed segmentation", "1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32,", "of atoms in the density/species matrices Returns the atom z numbers and means", "i[0] != 0] specie = unique_counts[-1][0] return specie def centroids(seg_img, R): \"\"\" Determine", "= np.array( [seg_classes[i] for i in range(len(seg_classes)) if seg_counts[i] > 3] ) seg_classes", ": bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1] :", "= 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where(", "components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array(", "np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox", "bbox[2] : bbox[5]] = np.where( Rp != 0, Rp, R[bbox[0] : bbox[3], bbox[1]", ": bbox[5]], ) continue # Get the foreground, bg etc. # Determine wether", "for cl in seg_classes: if verbose: print(\"Class\", cl) # Crop the images binary_cl", "= [] xc = np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] yc = np.linspace(0, R.shape[0],", "Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means = centroids(S, R)", "Label the connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True)", "0 max_class = np.max(R) wss = wss + max_class # sub region with", "0] if verbose: print(\"\\nIteration\", it) print(\"Classes\", seg_classes) print(\"Counts\", seg_counts) plot_points_3d(labels) for cl in", "to segment again on the basis of convexity and object counts if (", "if convexity >= min_convexity: max_class = np.max(R) R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for i in range(len(seg_classes)) if", "bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\" Majority vote of class", "cmask = R == cl smask = seg_img_coords[cmask] specie = majority_vote(seg_img, R, cl)", "binary_label_map = np.where(R == cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0", "== max_class] = 0 R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]]", "bbox[1] : bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] :", "wss, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) if verbose:", "in voxel coordinates\"\"\" M = M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R", "S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means", "bg = get_background(binary_bbox) unknown = bg - fg if verbose: print(\"Segmenting\") plot_points_3d(fg) #", "MIT ## Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools", "<filename>watershed.py \"\"\" ## Functions for computing watershed segmentation -------------------------------------------------- ## Author: <NAME>. ##", "bbox[4], bbox[2] : bbox[5]] = np.where( binary_bbox == cl, max_class + 1, R[bbox[0]", "# WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class =", "= np.linspace(0, R.shape[0], R.shape[0] + 1)[:-1] coords = np.array(list(product(xc, yc, zc))).reshape(32, 32, 32,", "3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl in classes:", "+ 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]], ) continue", "np.concatenate([seg_img.reshape(32, 32, 32, 1), coords], axis=-1) for cl in classes: cmask = R", "Determine wether or not to segment again on the basis of convexity and", "connected components labels = measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes =", "if seg_counts[i] > 3] ) seg_classes = seg_classes[seg_classes != 0] if verbose: print(\"\\nIteration\",", "erode fg = get_foreground(binary_bbox) bg = get_background(binary_bbox) unknown = bg - fg if", "= wss + max_class # sub region with classes relabelled wss[wss == max_class]", "0: means.append(np.mean(smask[:, 1:], axis=0)) atoms.append(specie) return atoms, means def watershed_clustering(M, S, Sb, max_iters=5,", "print(it, np.unique(R, return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\" Majority vote of", "== cl, max_class + 1, R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] :", "from itertools import product import matplotlib.pyplot as plt import numpy as np from", "def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes", "binary_bbox = crop(binary_cl, bbox) intensity_bbox = crop(intensity_cl, bbox) species_bbox = crop(species_cl, bbox) chull", ": bbox[5]] = np.where( wss != 0, wss, R[bbox[0] : bbox[3], bbox[1] :", "Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters, verbose=verbose) atoms, means =", "import product import matplotlib.pyplot as plt import numpy as np from scipy.ndimage.morphology import", "= sorted(list(zip(unique, counts)), key=lambda x: x[1]) unique_counts = [i for i in unique_counts", "verbose=False): \"\"\"Determine centroids and species of atoms in the density/species matrices Returns the", "if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts = np.unique(binary_label_map, return_counts=True) unique_counts =", "M.squeeze() S = S.squeeze() Sb = Sb.squeeze() R = segment_nuclei(Sb, S, M, max_iters=max_iters,", "wmin), nclasses) # Determine wether or not to segment again on the basis", "= measure.label(binary, connectivity=1) seg_classes, seg_counts = np.unique(labels, return_counts=True) seg_classes = np.array( [seg_classes[i] for", "of convexity and object counts if ( int(np.count_nonzero(wss) / wmin) > len(np.unique(wss)) -", "return_counts=True)) return R def majority_vote(seg_img, R, cl): \"\"\" Majority vote of class cl", "kernel = morphology.ball(kernel_size) return morphology.erosion(S, kernel) def crop(a, bbox): return a[bbox[0] : bbox[3],", "= np.array(list(product(xc, yc, zc))).reshape(32, 32, 32, 3) seg_img_coords = np.concatenate([seg_img.reshape(32, 32, 32, 1),", "R[bbox[0] : bbox[3], bbox[1] : bbox[4], bbox[2] : bbox[5]] = np.where( Rp !=", "- fg if verbose: print(\"Segmenting\") plot_points_3d(fg) # Markers for ws markers = measure.label(fg)", "Copyright: Copyright <NAME> & <NAME> 2020, ICSG3D ------------------------------------------------- \"\"\" from itertools import product", "np.where(labels == cl, species, 0) region = measure.regionprops(binary_cl, intensity_cl) bbox = region[0].bbox binary_bbox", "0 # WS wss = segmentation.watershed(binary_bbox, markers) wss[wss == 1] = 0 max_class", "scipy.ndimage.morphology import distance_transform_edt from skimage import filters, measure, morphology, segmentation from viz import", "classes relabelled wss[wss == max_class] = 0 nclasses = len(np.unique(wss)) - 1 if", "bbox[5]] def segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\"", ": bbox[4], bbox[2] : bbox[5]] ) else: R[bbox[0] : bbox[3], bbox[1] : bbox[4],", "segment_nuclei( binary, species, intensity, wmin=8, it=1, max_iters=5, min_convexity=0.8, verbose=False, ): \"\"\" Computes segmented", "a region R in segmented image\"\"\" binary_label_map = np.where(R == cl, seg_img, 0).astype(int)", ": bbox[4], bbox[2] : bbox[5]], ) if verbose: print(it, np.unique(R, return_counts=True)) return R", "max_class # sub region with classes relabelled Rp[Rp == max_class] = 0 R[bbox[0]", "cl, seg_img, 0).astype(int) if np.count_nonzero(binary_label_map) == 0: return 0 unique, counts = np.unique(binary_label_map,", "centroids and species of atoms in the density/species matrices Returns the atom z" ]
[ "pass class MatrixDimensionError(Exception): pass class MatrixNotSquare(Exception): pass class InvalidVectorError(Exception): pass class VectorDimensionError(Exception): pass", "class InvalidMatrixError(Exception): pass class MatrixDimensionError(Exception): pass class MatrixNotSquare(Exception): pass class InvalidVectorError(Exception): pass class", "InvalidMatrixError(Exception): pass class MatrixDimensionError(Exception): pass class MatrixNotSquare(Exception): pass class InvalidVectorError(Exception): pass class VectorDimensionError(Exception):" ]
[ "blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part", "img.height * 2) # generating a blank image of double height for x", "the original image :return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width,", "= every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img", "= img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper part of blank image", "every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green", "\"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show() if __name__ == '__main__':", "original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show() if __name__ == '__main__': main()", "generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show()", "str, the file directory of the original image :return: flip-vertical image \"\"\" img", "\"\"\" This program generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected", "every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def", "from simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename: str, the file directory", "by placing an inverse image of mt-rainier.jpg below the original one. \"\"\" from", "mt-rainier.jpg below the original one. \"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\"", "image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) # generating", "every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\" This program generates a", "\"\"\" :param filename: str, the file directory of the original image :return: flip-vertical", "height for x in range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y)", "program generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg')", "= every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green =", "in range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x,", "File: mirror_lake.py ---------------------------------- This file reads in mt-rainier.jpg and makes a new image", "= blank_img.get_pixel(x, y) # upper part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height", "generating a blank image of double height for x in range(img.width): for y", "of mt-rainier.jpg below the original one. \"\"\" from simpleimage import SimpleImage def reflect(filename):", "a blank image of double height for x in range(img.width): for y in", "new image that creates a mirror lake vibe by placing an inverse image", "def reflect(filename): \"\"\" :param filename: str, the file directory of the original image", "return blank_img def main(): \"\"\" This program generates a flip-vertical image. \"\"\" original_mt", "lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\" This program", "\"\"\" File: mirror_lake.py ---------------------------------- This file reads in mt-rainier.jpg and makes a new", "file directory of the original image :return: flip-vertical image \"\"\" img = SimpleImage(filename)", "SimpleImage def reflect(filename): \"\"\" :param filename: str, the file directory of the original", "image of double height for x in range(img.width): for y in range(img.height): every_color_of_pixel", "makes a new image that creates a mirror lake vibe by placing an", "mt-rainier.jpg and makes a new image that creates a mirror lake vibe by", "filename: str, the file directory of the original image :return: flip-vertical image \"\"\"", "original one. \"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename: str,", "original image :return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height", "flip-vertical image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) #", "blank_img def main(): \"\"\" This program generates a flip-vertical image. \"\"\" original_mt =", "upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue", "a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show() if", "\"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename: str, the file", "in mt-rainier.jpg and makes a new image that creates a mirror lake vibe", "of the original image :return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img =", "reflect(filename): \"\"\" :param filename: str, the file directory of the original image :return:", "image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part of", "= every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\" This program generates", "= SimpleImage.blank(img.width, img.height * 2) # generating a blank image of double height", "for x in range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank", "# upper part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 -", "every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue", "part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) #", "lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part of blank_image", "vibe by placing an inverse image of mt-rainier.jpg below the original one. \"\"\"", "the file directory of the original image :return: flip-vertical image \"\"\" img =", "y) # upper part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1", "SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a blank image of", "of blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y) # lower", "blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red", "This file reads in mt-rainier.jpg and makes a new image that creates a", "file reads in mt-rainier.jpg and makes a new image that creates a mirror", "for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) #", ":param filename: str, the file directory of the original image :return: flip-vertical image", "mirror_lake.py ---------------------------------- This file reads in mt-rainier.jpg and makes a new image that", "upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green", "1 - y) # lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green =", "in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper part", "blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a blank image of double", "= every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\"", "main(): \"\"\" This program generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show()", "image of mt-rainier.jpg below the original one. \"\"\" from simpleimage import SimpleImage def", "an inverse image of mt-rainier.jpg below the original one. \"\"\" from simpleimage import", "lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\" This program generates a flip-vertical", "blank_img.height - 1 - y) # lower part of blank_image upper_blank.red = every_color_of_pixel.red", "y) # lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue", "image :return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height *", "image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show() if __name__ ==", "2) # generating a blank image of double height for x in range(img.width):", "creates a mirror lake vibe by placing an inverse image of mt-rainier.jpg below", "upper part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height - 1 - y)", "= SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a blank image", "every_color_of_pixel.blue return blank_img def main(): \"\"\" This program generates a flip-vertical image. \"\"\"", "= every_color_of_pixel.blue return blank_img def main(): \"\"\" This program generates a flip-vertical image.", "that creates a mirror lake vibe by placing an inverse image of mt-rainier.jpg", "a mirror lake vibe by placing an inverse image of mt-rainier.jpg below the", "range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper part of", "of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red =", "below the original one. \"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\" :param", "\"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a", "of double height for x in range(img.width): for y in range(img.height): every_color_of_pixel =", "lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue", "lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main():", "# generating a blank image of double height for x in range(img.width): for", "the original one. \"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename:", "= blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part of blank_image upper_blank.red", "blank_img.get_pixel(x, blank_img.height - 1 - y) # lower part of blank_image upper_blank.red =", "x in range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank =", "image that creates a mirror lake vibe by placing an inverse image of", "* 2) # generating a blank image of double height for x in", "mirror lake vibe by placing an inverse image of mt-rainier.jpg below the original", "blank_img.get_pixel(x, y) # upper part of blank image lower_blank = blank_img.get_pixel(x, blank_img.height -", "part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red", "double height for x in range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x,", "every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper part of blank", "every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return blank_img def main(): \"\"\" This", "and makes a new image that creates a mirror lake vibe by placing", "img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2) # generating a blank", "This program generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected =", "- y) # lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green", "directory of the original image :return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img", "lake vibe by placing an inverse image of mt-rainier.jpg below the original one.", "a new image that creates a mirror lake vibe by placing an inverse", "one. \"\"\" from simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename: str, the", "- 1 - y) # lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green", "img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper part of blank image lower_blank", "y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y) # upper", "y) upper_blank = blank_img.get_pixel(x, y) # upper part of blank image lower_blank =", "range(img.width): for y in range(img.height): every_color_of_pixel = img.get_pixel(x, y) upper_blank = blank_img.get_pixel(x, y)", "upper_blank = blank_img.get_pixel(x, y) # upper part of blank image lower_blank = blank_img.get_pixel(x,", "flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg') original_mt.show() reflected = reflect('images/mt-rainier.jpg') reflected.show() if __name__", "placing an inverse image of mt-rainier.jpg below the original one. \"\"\" from simpleimage", ":return: flip-vertical image \"\"\" img = SimpleImage(filename) blank_img = SimpleImage.blank(img.width, img.height * 2)", "simpleimage import SimpleImage def reflect(filename): \"\"\" :param filename: str, the file directory of", "def main(): \"\"\" This program generates a flip-vertical image. \"\"\" original_mt = SimpleImage('images/mt-rainier.jpg')", "---------------------------------- This file reads in mt-rainier.jpg and makes a new image that creates", "inverse image of mt-rainier.jpg below the original one. \"\"\" from simpleimage import SimpleImage", "SimpleImage.blank(img.width, img.height * 2) # generating a blank image of double height for", "# lower part of blank_image upper_blank.red = every_color_of_pixel.red upper_blank.green = every_color_of_pixel.green upper_blank.blue =", "reads in mt-rainier.jpg and makes a new image that creates a mirror lake", "= every_color_of_pixel.green upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue =", "import SimpleImage def reflect(filename): \"\"\" :param filename: str, the file directory of the", "upper_blank.blue = every_color_of_pixel.blue lower_blank.red = every_color_of_pixel.red lower_blank.green = every_color_of_pixel.green lower_blank.blue = every_color_of_pixel.blue return", "blank image of double height for x in range(img.width): for y in range(img.height):" ]
[ "data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree", "names=['category'] + [\"w_%s\" % i for i in range(0, 856)]) y = data.category", "= pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature, threshold=clf.tree_.threshold)) var_df.to_csv(proj_path('src/test/data/CNAE-9-importance.csv')) pred_df.to_csv(proj_path('src/test/data/CNAE-9-predicted.csv'))", "+ [\"w_%s\" % i for i in range(0, 856)]) y = data.category X", "Generate files for decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def", "test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature,", "files for decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path):", "X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X,", "proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for", "y = data.category X = data[[\"w_%s\" % i for i in range(0, 856)]]", "= X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf =", "data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i in range(0, 856)])", "# Save the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df =", "from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print", "i in range(0, 856)]) y = data.category X = data[[\"w_%s\" % i for", "decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR,", "var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature, threshold=clf.tree_.threshold)) var_df.to_csv(proj_path('src/test/data/CNAE-9-importance.csv'))", "% i for i in range(0, 856)]] # Save output data X_df =", "tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf print \"Impurity len: %s\" %", "y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X)", "X = data[[\"w_%s\" % i for i in range(0, 856)]] # Save output", "for i in range(0, 856)]) y = data.category X = data[[\"w_%s\" % i", "for i in range(0, 856)]] # Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv'))", "clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf print \"Impurity len:", "% i for i in range(0, 856)]) y = data.category X = data[[\"w_%s\"", "i for i in range(0, 856)]] # Save output data X_df = X.transpose()", "% len(clf.tree_.impurity) # Save the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns)", "''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'),", "856)]] # Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv'))", "y) pred = clf.predict(X) print clf print \"Impurity len: %s\" % len(clf.tree_.impurity) #", "integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data", "pd ''' Generate files for decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__),", "= data[[\"w_%s\" % i for i in range(0, 856)]] # Save output data", "pandas as pd ''' Generate files for decision tree integration test ''' BASEDIR", "pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature, threshold=clf.tree_.threshold)) var_df.to_csv(proj_path('src/test/data/CNAE-9-importance.csv')) pred_df.to_csv(proj_path('src/test/data/CNAE-9-predicted.csv')) tree_df.to_csv(proj_path('src/test/data/CNAE-9-tree.csv'))", "clf print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save the data for test", "test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data =", "clf.fit(X, y) pred = clf.predict(X) print clf print \"Impurity len: %s\" % len(clf.tree_.impurity)", "range(0, 856)]] # Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y)", "data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df =", "856)]) y = data.category X = data[[\"w_%s\" % i for i in range(0,", "[\"w_%s\" % i for i in range(0, 856)]) y = data.category X =", "i for i in range(0, 856)]) y = data.category X = data[[\"w_%s\" %", "= data.category X = data[[\"w_%s\" % i for i in range(0, 856)]] #", "os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i in", "range(0, 856)]) y = data.category X = data[[\"w_%s\" % i for i in", "= os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] +", "data.category X = data[[\"w_%s\" % i for i in range(0, 856)]] # Save", "print clf print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save the data for", "pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i in range(0, 856)]) y =", "verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity, feature=clf.tree_.feature, threshold=clf.tree_.threshold))", "as pd ''' Generate files for decision tree integration test ''' BASEDIR =", "X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf", "data[[\"w_%s\" % i for i in range(0, 856)]] # Save output data X_df", "clf.predict(X) print clf print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save the data", "for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df = pd.DataFrame(dict(impurity=clf.tree_.impurity,", "'../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" %", "import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf print", "def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i", "len: %s\" % len(clf.tree_.impurity) # Save the data for test verification var_df =", "%s\" % len(clf.tree_.impurity) # Save the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_),", "Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn", "os import pandas as pd ''' Generate files for decision tree integration test", "print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save the data for test verification", "# Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from", "len(clf.tree_.impurity) # Save the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df", "= tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf print \"Impurity len: %s\"", "BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category']", "os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\"", "\"Impurity len: %s\" % len(clf.tree_.impurity) # Save the data for test verification var_df", "pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred =", "X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6)", "in range(0, 856)]] # Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df =", "sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf", "Save the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred))", "tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred = clf.predict(X) print clf print \"Impurity", "in range(0, 856)]) y = data.category X = data[[\"w_%s\" % i for i", "pred = clf.predict(X) print clf print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save", "for decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return", "= clf.predict(X) print clf print \"Impurity len: %s\" % len(clf.tree_.impurity) # Save the", "''' Generate files for decision tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..'))", "i in range(0, 856)]] # Save output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df", "output data X_df = X.transpose() X_df.to_csv(proj_path('data/CNAE-9-wide.csv')) y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import", "path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i in range(0,", "import os import pandas as pd ''' Generate files for decision tree integration", "the data for test verification var_df = pd.DataFrame(dict(importance=clf.feature_importances_), index=X.columns) pred_df = pd.DataFrame(dict(predicted=pred)) tree_df", "y_df = pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y)", "tree integration test ''' BASEDIR = os.path.abspath(os.path.join(os.path.basename(__file__), '../../../..')) def proj_path(path): return os.path.join(BASEDIR, path)", "= pd.DataFrame(y) y_df.to_csv(proj_path('data/CNAE-9-labels.csv')) from sklearn import tree clf = tree.DecisionTreeClassifier(random_state=6) clf.fit(X, y) pred", "return os.path.join(BASEDIR, path) data = pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i", "import pandas as pd ''' Generate files for decision tree integration test '''", "= pd.read_csv(proj_path('data/CNAE-9.csv'), names=['category'] + [\"w_%s\" % i for i in range(0, 856)]) y" ]
[ "Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of", "Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\",", "def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax", "chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that", "import datetime as dt import matplotlib.pyplot as plt import pandas as pd import", "people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men :", "men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average", ": Age-Impression chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'],", "plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for", "6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content", "ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age", "= sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which", "ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1,", "pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as dates def func(pct,", "Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number", "autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average", "ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and", "Countries in which the Page was appeared on their screens',fontsize=15) plt.title('Average content activity.", "fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart')", "Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)')", "are talking about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300)", "content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf()", "their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was", "of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women", "1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8,", "0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf() fig, ax", "created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx'", "load_workbook import numpy as np import datetime as dt import matplotlib.pyplot as plt", "sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the", "print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content)", "plt import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as dates", "plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until", ": Age-Content chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'],", "Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe):", "fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men", "plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8,", "men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until", ":'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression", "dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig,", "Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\",", "created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7)", "that Page was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates", "of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men", "plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6),", "plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression", "plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was appeared on", "ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression", "people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men :", "sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,)", "until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12,", "= dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s", "xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression)", "fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart')", "people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women :", "fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12))", "bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf() def", "as pd import matplotlib.pyplot as plt import matplotlib.dates as dates def func(pct, allvals):", "= plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their screen',fontsize=15)", "def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe):", "appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart", "= dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression =", "plt import matplotlib.dates as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d}", "ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age", "'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression =", "= pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx'", "women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until", "as dt import matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot as", "Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1]", "date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10)", "top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average content activity. Days:", "openpyxl import load_workbook import numpy as np import datetime as dt import matplotlib.pyplot", "ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was", "def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8,", "sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are", "import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as dates def", "title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart", "left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf()", "dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10)", "was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig,", "Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was", "func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe)", "Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of", "as plt import matplotlib.dates as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return", "int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age", "= plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart", "numpy as np import datetime as dt import matplotlib.pyplot as plt import pandas", "fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women", "pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average", "as np import datetime as dt import matplotlib.pyplot as plt import pandas as", "= plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was appeared", "fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of", "plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax =", "plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared", "plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6),", "0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf() fig, ax", "Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was", "dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries", "bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf() fig,", "on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was", "about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity", "(number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300)", "12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average content activity.", "Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression =", "= int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1]", "fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the", "print(\"Women : Age-Content chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\"))", "date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression", "plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content =", "plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression", "= dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12))", "(number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300)", "print(\"Men : Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression", "'+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig, ax =", "absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig,", "until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center", "Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age", "pd import matplotlib.pyplot as plt import matplotlib.dates as dates def func(pct, allvals): absolute", "plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,", "plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for", "loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\")", "plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:]", "import matplotlib.dates as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct,", "ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content", "ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number", "was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression", "until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression =", "import load_workbook import numpy as np import datetime as dt import matplotlib.pyplot as", "1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2)", "#print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content=", "as plt import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as", "plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf()", "0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1)", "\"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean()", "pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage", "= plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart", "men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda", "as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+']", "= dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7)", "was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values),", "def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression =", "#print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig,", "labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax", "ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and", "Age-Impression chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True,", "subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until", "screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\")", "autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average", "left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf()", "#print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6),", "Age-Content chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True,", "left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf()", "# iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\"))", "age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx'", "activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts():", "= sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that", "absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols]", "top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days", "<gh_stars>0 from openpyxl import load_workbook import numpy as np import datetime as dt", "men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8,", "ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression", "grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was appeared on their screens',fontsize=15) plt.title('Average", "screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf()", "top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People", "of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women", "chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression", "ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number", "6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content", "title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart", "grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\"", "was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression", "print(\"City-Content Activity chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,)", "Page was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300)", "plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average content", "\"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1]", "Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number", "= dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'],", "plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression)", "(number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300)", "for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age", "subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\"", "that are talking about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\"))", "plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\")", "12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their screen',fontsize=15) plt.title('Average Impressions.", "appeared on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression", "plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their screen',fontsize=15) plt.title('Average", "allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) #", "created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:]", "Page was appeared on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig,", "print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression =", "for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages,", "def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax =", "talking about the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content", "0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf() fig, ax =", "objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking", "pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression =", "= plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart", "until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content", "Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\",", "and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1))", ": Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression =", "dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def", "Activity chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People", "until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center", "= 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression = 'excels/Country-Impression.xlsx' country_impression= pd.read_excel(xlsxfile_country_impression) plot_country(country_impression) run_charts()", "0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age", "'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx'", "for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age", "chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0])", "def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression =", "sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes", "plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average", "textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and", "= dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values),", "dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True,", "content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def", "on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart", "run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression)", "loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\")", "loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\")", "dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"))", "left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf()", "pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage", "plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1]", "xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression)", "created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was", "iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'],", "(number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300)", "fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women", "Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig, ax", "was appeared on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300)", "= 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression", "city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression = 'excels/Country-Impression.xlsx'", "dt import matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot as plt", "Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages,", "= sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8,", "= pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression", "plt.ylabel('People that Page was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until", "fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page", "\"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content =", "top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was appeared on their screens',fontsize=15)", "='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content", "ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content", "Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages,", "plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10)", "1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age =", "ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0,", "\"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12))", "women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda", "chart was created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:]", "pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content", "#print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content =", "which the Page was appeared on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\"", "plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression", "12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in which the Page was appeared on their", "sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,)", "= 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression", "was created!\") plt.clf() def plot_city(dataframe1,dataframe2): #print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content=", "autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart')", "plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6),", "dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average", "import matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot as plt import", "textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and", "plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax =", "ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15)", "= plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about the Page',fontsize=15) plt.title('Average", ": Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression", "bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\") plt.clf() fig,", "dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax = plt.subplots(figsize=(12,", "date_impression = dataframe.iloc[:,0:1] fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values),", "return \"{:.1f}%\\n({:d} )\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age =", "xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content = 'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content)", "0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe):", "title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart", "Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was", "from openpyxl import load_workbook import numpy as np import datetime as dt import", "bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf() def", "print(\"Women : Age-Impression chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\"))", "top10_content.axes fig, ax = plt.subplots(figsize=(12, 12)) top10_content.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People that are talking about", "Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart was", "their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\")", "plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values)", "ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their", "= pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression = 'excels/Country-Impression.xlsx' country_impression=", "was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression =", "6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\"", "sub_impression = dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax", "the Page',fontsize=15) plt.title('Average content activity. Days: '+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Content\"+\".png\",dpi=300) print(\"City-Content Activity chart", "the Page was appeared on their screens',fontsize=15) plt.title('Average content activity. Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until", ")\".format(pct, absolute) labels_Ages=['13-17','18-24','25-34','35-44','45-54','55-64','65+'] def plot_ages_content(dataframe): #print(dataframe) # iloc[rows,cols] date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean()", "plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content ='excels/Ages-Content.xlsx' age_content = pd.read_excel(xlsxfile_age_content)", "plt.ylabel('People\\'s Countries in which the Page was appeared on their screens',fontsize=15) plt.title('Average content", "women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Women Age Chart for Impression\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15)", "xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression = 'excels/Country-Impression.xlsx' country_impression= pd.read_excel(xlsxfile_country_impression) plot_country(country_impression)", "np import datetime as dt import matplotlib.pyplot as plt import pandas as pd", "title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart", "ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age", "\"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\",", "top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True,position=0.8, grid=True,fontsize=8,rot=6,) plt.ylabel('People\\'s Countries in", "matplotlib.pyplot as plt import matplotlib.dates as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals))", "textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage", "pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression = 'excels/Country-Impression.xlsx' country_impression= pd.read_excel(xlsxfile_country_impression)", "Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)')", "subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\"", "age_content = pd.read_excel(xlsxfile_age_content) plot_ages_content(age_content) xlsxfile_age_impression = 'excels/Ages-Impressions.xlsx' age_impression = pd.read_excel(xlsxfile_age_impression) plot_ages_impressions(age_impression) xlsxfile_city_content =", "date_content=dataframe.iloc[:,0:1] women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True,", "= dataframe2.iloc[:,1:] top10_content = sub_content.mean().nlargest(10) top10_impression = sub_impression.mean().nlargest(10) objects_content= top10_content.axes fig, ax =", "grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on their screen',fontsize=15) plt.title('Average Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\"", "\"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\",", "#print(dataframe1) #print(dataframe2) date_content=dataframe1.iloc[:,0:1] date_impression = dataframe1.iloc[:,0:1] #print(date_content.iat[-1,0]) sub_content= dataframe1.iloc[:,1:] sub_impression = dataframe2.iloc[:,1:] top10_content", "0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf() fig, ax =", "in which the Page was appeared on their screens',fontsize=15) plt.title('Average content activity. Days:", "loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Content\"+\".png\",dpi=300) print(\"Women : Age-Content chart was created!\")", "of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men", "people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women :", "6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\"", "plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean()", "subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until", "1)) plt.savefig(\"charts/Women-Age-Impression\"+\".png\",dpi=300) print(\"Women : Age-Impression chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8,", "print(\"Men : Age-Content chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean()", "0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Impression\"+\".png\",dpi=300) print(\"Men : Age-Impression chart was created!\") plt.clf() def plot_city(dataframe1,dataframe2):", "Days: '+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/Country-Impression\"+\".png\",dpi=300) print(\"Country-Impression chart was created!\") plt.clf() def run_charts(): xlsxfile_age_content", "pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average", "created!\") plt.clf() #print(top10_content.values) def plot_country(dataframe): date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression =", "chart was created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1]", "matplotlib.dates as dates def func(pct, allvals): absolute = int(pct/100.*np.sum(allvals)) return \"{:.1f}%\\n({:d} )\".format(pct, absolute)", "Percentage and (number of people)') ax.legend(labels=labels_Ages, title=\"Age Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5,", "fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page was appeared on", "plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) women_age.plot.pie(labels=['F.13-17','F.18-24','F.25-34','F.35-44','F.45-54','F.55-64','F.65+'], fontsize=8,subplots=True, autopct=lambda pct:func(pct,women_age.values), textprops=dict(color=\"w\")) ax.set_title(\"Average Women Age Chart for", "Impressions. Days :'+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) plt.savefig(\"charts/City-Impressions\"+\".png\",dpi=300) print(\"City-Impression chart was created!\") plt.clf() #print(top10_content.values) def", "datetime as dt import matplotlib.pyplot as plt import pandas as pd import matplotlib.pyplot", "import numpy as np import datetime as dt import matplotlib.pyplot as plt import", "import matplotlib.pyplot as plt import matplotlib.dates as dates def func(pct, allvals): absolute =", "Groups\", loc=\"center left\", bbox_to_anchor=(1, 0, 0.5, 1)) plt.savefig(\"charts/Men-Age-Content\"+\".png\",dpi=300) print(\"Men : Age-Content chart was", "Age Chart for Impressions\"+date_impression.iat[0,0].split(\"T\",1)[0]+\" until \"+date_impression.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage and (number of people)')", "date_impression = dataframe.iloc[:,0:1] sub_impression = dataframe.iloc[:,1:] top10_impression = sub_impression.mean().nlargest(10) fig, ax = plt.subplots(figsize=(12,", "autopct=lambda pct:func(pct,men_age.values), textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart')", "was created!\") plt.clf() fig, ax = plt.subplots(figsize=(12, 12)) top10_impression.plot(kind='bar',use_index=True, grid=True,fontsize=8,rot=10,) plt.ylabel('People that Page", "textprops=dict(color=\"w\"),pctdistance=0.7) ax.set_title(\"Average Men Age Chart for Content Activity\"+date_content.iat[0,0].split(\"T\",1)[0]+\" until \"+date_content.iat[-1,0].split(\"T\",1)[0],fontsize=15) ax.set_ylabel('Chart') ax.set_xlabel('Average Percentage", "'excels/City-Content.xlsx' city_content = pd.read_excel(xlsxfile_city_content) xlsxfile_city_impression = 'excels/City-Impression.xlsx' city_impression = pd.read_excel(xlsxfile_city_impression) plot_city(city_content,city_impression) xlsxfile_country_impression =", "chart was created!\") plt.clf() fig, ax = plt.subplots(figsize=(8, 6), subplot_kw=dict(aspect=\"equal\")) men_age.plot.pie(labels=['M.13-17','M.18-24','M.25-34','M.35-44','M.45-54','M.55-64','M.65+'], fontsize=8,subplots=True, autopct=lambda", "created!\") plt.clf() def plot_ages_impressions(dataframe): women_age = dataframe.iloc[:,1:8].mean() men_age=dataframe.iloc[:,8:15].mean() date_impression = dataframe.iloc[:,0:1] fig, ax" ]
[ "merging.\"\"\" from .cache import parse_cache_option from .core import Style __all__ = (\"Style\", \"parse_cache_option\")", "<gh_stars>0 \"\"\"Styles parsing and merging.\"\"\" from .cache import parse_cache_option from .core import Style", "\"\"\"Styles parsing and merging.\"\"\" from .cache import parse_cache_option from .core import Style __all__", "and merging.\"\"\" from .cache import parse_cache_option from .core import Style __all__ = (\"Style\",", "parsing and merging.\"\"\" from .cache import parse_cache_option from .core import Style __all__ =" ]
[ "self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000,", "self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1, 2, '4'),", "= [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t')", "'69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False)", "= [(1, 2, '4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock()", "mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self): #", "self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result", "def tearDown(self): self.test_obj = None def test_init(self): # setup test_obj = self.test_obj #", "tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj =", "mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'),", "== 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite =", "3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self):", "== exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end ==", "subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self): # setup test_obj =", "from .context import runner from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def", "def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold ==", "self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7,", "0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000,", "def test_init(self): # setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner))", "self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op", "mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list ==", "import subsets_exp from .context import config from .context import runner from .context import", "self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite if __name__ == '__main__':", "import mock import unittest from .context import subsets_exp from .context import config from", "def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite if __name__ == '__main__': unittest.main()", "4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1,", "2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list ==", "test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational =", "[(1, 2, '4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters", "self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value =", "= mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77,", "def test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77, '88'), (7, 88, '169')]", "self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase)", "import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj =", "= self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end", "exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4)", "tearDown(self): self.test_obj = None def test_init(self): # setup test_obj = self.test_obj # assert", "(7, 77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets)", "'169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value", "test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69')", "self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock()", "[(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result)", "self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp", "config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0", "= mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation", "[mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list", "the subsets_exp module. \"\"\" import mock import unittest from .context import subsets_exp from", "mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'),", "Tests the subsets_exp module. \"\"\" import mock import unittest from .context import subsets_exp", "4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0,", "'4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp)", "= None def test_init(self): # setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config))", "None def test_init(self): # setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj,", "subsets_exp module. \"\"\" import mock import unittest from .context import subsets_exp from .context", "self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite if __name__ ==", "= self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'),", "self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end =", "= [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro)", "self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000,", "\"\"\" import mock import unittest from .context import subsets_exp from .context import config", "def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result =", "= mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self):", "exp) def test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77, '88'), (7, 88,", "== exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start ==", "self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000,", "[mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list", "test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner)", "'88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp =", ".context import subsets_exp from .context import config from .context import runner from .context", "test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4)", "4) self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77,", "self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count", "setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self):", "config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj", "mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def", "# assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000", "== exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op =", "def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def", "# setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo)", "exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list ==", ".context import config from .context import runner from .context import test_utils as tu", "= mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')]", "import runner from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj", "self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v',", "88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock()", "= tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj =", "from .context import config from .context import runner from .context import test_utils as", "'t'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self):", "= [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3)", "1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) ==", "mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self): # setup test_obj = self.test_obj", "== 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v',", "from .context import subsets_exp from .context import config from .context import runner from", "exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count ==", "= mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v',", "mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2,", "Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj)", "2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result ==", "assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start", ".context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj", "'0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4)", "(7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1,", "77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp", "== exp) def test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77, '88'), (7,", "import config from .context import runner from .context import test_utils as tu class", "def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational", "subsets_exp from .context import config from .context import runner from .context import test_utils", "setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def", "self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7,", "runner from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj =", "subsets = [(1, 2, '4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run =", "(3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets =", "(2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def", "self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock()", "'t')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4,", "module. \"\"\" import mock import unittest from .context import subsets_exp from .context import", "mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent =", "'0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000,", "self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold", "= [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')]", "mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation =", "mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel", "'t') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro =", "4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase(", "unittest from .context import subsets_exp from .context import config from .context import runner", "'4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock()", "== 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1, 2, '4'), (7,", "self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list", "== '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return", "= 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp =", "\"\"\" Tests the subsets_exp module. \"\"\" import mock import unittest from .context import", "from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config()", "tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None", "= mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)]", "'1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp)", "self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def", "config from .context import runner from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase):", "[mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def", "def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite if", "exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't') self.test_obj.change_config_rel_op = mock.Mock()", "runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold =", "import unittest from .context import subsets_exp from .context import config from .context import", "77, '88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self):", "mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88, '169')]", "'69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite", "= 0 self.test_obj.config_obj.fold = '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'),", "mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def", "= subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self): # setup test_obj", "2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite():", "(1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result", "('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro", "= '0' result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000, '1'),", "2, '4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters =", "mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with()", "self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer)", "test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite = unittest.TestLoader().loadTestsFromTestCase( Subsets_ExperimentTestCase) return suite if __name__", "self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def test_suite(): suite", "self.test_obj = subsets_exp.Subsets_Experiment(config_obj, mock_runner_obj) def tearDown(self): self.test_obj = None def test_init(self): # setup", "self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end", "exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000, '2'), (3000, 4000,", "exp_rel = [mock.call('v', 't'), mock.call('v', 't')] self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel)", "class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj = subsets_exp.Subsets_Experiment(config_obj,", "self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold = '0'", "self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77, '88'),", "self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True),", "test_run_experiment(self): subsets = [(1, 2, '4'), (7, 77, '88'), (7, 88, '169')] self.test_obj.single_run", "test_init(self): # setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified)", "3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent = mock.Mock() self.test_obj.runner_obj.run_independent.return_value = ('v', 't')", "test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self):", "self.test_obj = None def test_init(self): # setup test_obj = self.test_obj # assert self.assertTrue(isinstance(test_obj.config_obj,", "self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start = 0 self.test_obj.config_obj.fold", "self.test_obj.runner_obj.run_independent.assert_called_with() self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69')", "as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self): config_obj = tu.sample_config() mock_runner_obj = mock.Mock(runner.Runner) self.test_obj", "== 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self): self.test_obj.change_config_rel_op(train=False) self.assertTrue(self.test_obj.config_obj.infer) def", "result = self.test_obj.divide_data_into_subsets(num_subsets=4) exp = [(0, 1000, '0'), (1000, 2000, '1'), (2000, 3000,", "self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run() exp_ccro = [mock.call(train=True), mock.call(train=False)] exp_rel =", "'3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets = [(1, 2,", "self.assertTrue(isinstance(test_obj.config_obj, config.Config)) self.assertTrue(isinstance(test_obj.runner_obj, runner.Runner)) self.assertTrue(test_obj.config_obj.modified) self.assertTrue(test_obj.config_obj.pseudo) def test_divide_data_into_subsets(self): self.test_obj.config_obj.end = 4000 self.test_obj.config_obj.start =", "'2'), (3000, 4000, '3')] self.assertTrue(len(result) == 4) self.assertTrue(result == exp) def test_run_experiment(self): subsets", "'88'), mock.call(7, 88, '169')] self.assertTrue(self.test_obj.single_run.call_count == 3) self.assertTrue(self.test_obj.change_config_parameters.call_args_list == exp_ccp) def test_single_run(self): self.test_obj.runner_obj.run_independent", "= mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4'), mock.call(7, 77, '88'), mock.call(7, 88,", "mock import unittest from .context import subsets_exp from .context import config from .context", "= ('v', 't') self.test_obj.change_config_rel_op = mock.Mock() self.test_obj.runner_obj.run_relational = mock.Mock() self.test_obj.runner_obj.run_evaluation = mock.Mock() self.test_obj.single_run()", "4, '69') self.assertTrue(self.test_obj.config_obj.start == 2) self.assertTrue(self.test_obj.config_obj.end == 4) self.assertTrue(self.test_obj.config_obj.fold == '69') def test_change_config_rel_op(self):", ".context import runner from .context import test_utils as tu class Subsets_ExperimentTestCase(unittest.TestCase): def setUp(self):", "88, '169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2,", "exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start == 2)", "self.assertTrue(self.test_obj.change_config_rel_op.call_args_list == exp_ccro) self.assertTrue(self.test_obj.runner_obj.run_relational.call_args_list == exp_rel) self.test_obj.runner_obj.run_evaluation.assert_called_with('t') def test_change_config_parameters(self): self.test_obj.change_config_parameters(2, 4, '69') self.assertTrue(self.test_obj.config_obj.start", "'169')] self.test_obj.single_run = mock.Mock() self.test_obj.change_config_parameters = mock.Mock() self.test_obj.run_experiment(subsets) exp_ccp = [mock.call(1, 2, '4')," ]
[ "min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price == 0 and min_price", "max_price == 0 and min_price == 0: return if price >= max_price: for", "time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif price <= min_price: for _", "min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"])", "print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price == 0 and min_price ==", "price)) if max_price == 0 and min_price == 0: return if price >=", "= result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price,", "if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2]) while True: time.sleep(5)", "import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order =", "if max_price == 0 and min_price == 0: return if price >= max_price:", "== \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2]) while True: time.sleep(5) watch_price(max_price, min_price)", "sys import time from datetime import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\")", "0 and min_price == 0: return if price >= max_price: for _ in", "for _ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif price", "= datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now,", "time from datetime import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result =", "def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price", "from datetime import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json()", "% (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price == 0", "% price) if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2]) while", "int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\"", "price: %d\\a\\n\" % price) elif price <= min_price: for _ in range(5): time.sleep(0.2)", "for _ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if __name__", "0: return if price >= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max", "max price: %d\\a\\n\" % price) elif price <= min_price: for _ in range(5):", "min_price == 0: return if price >= max_price: for _ in range(3): time.sleep(0.2)", "%d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price ==", "price >= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" %", "price) if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2]) while True:", "in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif price <= min_price:", "requests import sys import time from datetime import datetime def watch_price(max_price=0, min_price=0): resp", "elif price <= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\"", "result = resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit:", "date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" %", "order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" %", "price) elif price <= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min price:", "min price: %d\\a\\n\" % price) if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price", "in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if __name__ == \"__main__\":", "<= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price)", "= int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice:", "(date_now, price)) if max_price == 0 and min_price == 0: return if price", "datetime import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order", "return if price >= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max price:", "price <= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" %", "price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time:", "import time from datetime import datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result", ">= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price)", "== 0: return if price >= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!!", "import requests import sys import time from datetime import datetime def watch_price(max_price=0, min_price=0):", "requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"]))", "== 0 and min_price == 0: return if price >= max_price: for _", "%d\\n\" % (date_now, price)) if max_price == 0 and min_price == 0: return", "%s\\nprice: %d\\n\" % (date_now, price)) if max_price == 0 and min_price == 0:", "resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\"", "datetime def watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1]", "_ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if __name__ ==", "_ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif price <=", "range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if __name__ == \"__main__\": max_price", "max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif", "%d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price", "<reponame>usjeong/coining-monitor<filename>order.py import requests import sys import time from datetime import datetime def watch_price(max_price=0,", "print(\"warn!!! min price: %d\\a\\n\" % price) if __name__ == \"__main__\": max_price = int(sys.argv[1])", "= requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now =", "datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price))", "min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if", "% (date_now, price)) if max_price == 0 and min_price == 0: return if", "print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if", "result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit: %d\\n\" % (max_price, min_price))", "if price >= max_price: for _ in range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\"", "price: %d\\a\\n\" % price) if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price =", "resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now", "import sys import time from datetime import datetime def watch_price(max_price=0, min_price=0): resp =", "%d\\a\\n\" % price) elif price <= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!!", "time.sleep(0.2) print(\"warn!!! min price: %d\\a\\n\" % price) if __name__ == \"__main__\": max_price =", "print(\"warn!!! max price: %d\\a\\n\" % price) elif price <= min_price: for _ in", "(max_price, min_price)) print(\"time: %s\\nprice: %d\\n\" % (date_now, price)) if max_price == 0 and", "__name__ == \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2]) while True: time.sleep(5) watch_price(max_price,", "watch_price(max_price=0, min_price=0): resp = requests.get(\"https://api.coinone.co.kr/trades/?currency=eth\") result = resp.json() order = result[\"completeOrders\"][-1] price =", "%d\\a\\n\" % price) if __name__ == \"__main__\": max_price = int(sys.argv[1]) min_price = int(sys.argv[2])", "% price) elif price <= min_price: for _ in range(5): time.sleep(0.2) print(\"warn!!! min", "= resp.json() order = result[\"completeOrders\"][-1] price = int(order[\"price\"]) date_now = datetime.fromtimestamp(int(order[\"timestamp\"])) print(\"max_limit: %d\\nmin_limit:", "and min_price == 0: return if price >= max_price: for _ in range(3):", "range(3): time.sleep(0.2) print(\"warn!!! max price: %d\\a\\n\" % price) elif price <= min_price: for" ]
[ "time import moviepy.editor import pygame from blessed import Terminal from PIL import Image,", "import os import sys import time import moviepy.editor import pygame from blessed import", "= im.load() res = '' for y in range(im.size[1] // 2): for x", "like this for some reason # noinspection PyUnresolvedReferences r, g, b = pixels[x,", "= pixels[x, y * 2] # noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x,", "\"Actual frame: {} | \" \"Theoretical frame: {} | \" \"Dropped frames: {}", "ImageOps import cv2 term = Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im):", "{}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed )", "\" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) /", "term.fullscreen(): # get start time start = time.time() # variables frame_count = 1", "Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \" \"Actual", "or %s or %s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) +", "moviepy.editor import pygame from blessed import Terminal from PIL import Image, ImageOps import", "# load video capture = cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) #", "for y in range(im.size[1] // 2): for x in range(im.size[0]): # false positives,", "pixels = im.load() res = '' for y in range(im.size[1] // 2): for", "sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \" \"Actual frame:", "get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video v = moviepy.editor.VideoFileClip(path)", "img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black +", "while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) # esc if inp ==", "load video capture = cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) # load", "first: pygame.mixer.music.play() first = False ret, frame = capture.read() elapsed = time.time() -", "sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \" \"Actual frame: {} | \"", "pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first = True # main loop while", "< expected_frame: frame_count += 1 dropped_frames += 1 continue if not ret: break", "moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] +", "or inp == \"q\": break if inp == ' ': pause = not", "pause = False first = True # main loop while capture.isOpened(): # for", "b2) + HALF return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get", "first = False ret, frame = capture.read() elapsed = time.time() - start expected_frame", "cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video v", "image(im): im = ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load() res =", "return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start", "term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or inp == \"q\": break if", "= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed", ") ) ) if not pause: if first: pygame.mixer.music.play() first = False ret,", "\" \"Actual frame: {} | \" \"Theoretical frame: {} | \" \"Dropped frames:", "not ret: break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img)", "expected_frame = int(elapsed * fps) if frame_count < expected_frame: frame_count += 1 dropped_frames", "+ 1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF", "= False first = True # main loop while capture.isOpened(): # for pause/exit", "= Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \"", "if inp == \"\\x1b\" or inp == \"q\": break if inp == '", "not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) //", "cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {}", "1 dropped_frames = 0 # load video capture = cv2.VideoCapture(path) # get fps", "false positives, pycharm doesn't like this for some reason # noinspection PyUnresolvedReferences r,", "HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height * 2)) pixels =", "+ term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if", "* 2 + 1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2)", "y * 2] # noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y *", "+ \".wav\") pause = False first = True # main loop while capture.isOpened():", "term.color_rgb(r2, g2, b2) + HALF return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen():", "pygame from blessed import Terminal from PIL import Image, ImageOps import cv2 term", "expected_frame: frame_count += 1 dropped_frames += 1 continue if not ret: break frame_count", "range(im.size[1] // 2): for x in range(im.size[0]): # false positives, pycharm doesn't like", "video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio", "res = '' for y in range(im.size[1] // 2): for x in range(im.size[0]):", "blessed import Terminal from PIL import Image, ImageOps import cv2 term = Terminal()", "2)) print( term.black_on_white( term.center( 'Paused. Press %s to unpause, or %s or %s", "start = time.time() # variables frame_count = 1 dropped_frames = 0 # load", "time: {} | \" \"Actual frame: {} | \" \"Theoretical frame: {} |", "# main loop while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) # esc", "in range(im.size[1] // 2): for x in range(im.size[0]): # false positives, pycharm doesn't", "\" \"Dropped frames: {} | \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame,", "1 continue if not ret: break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)", "2): for x in range(im.size[0]): # false positives, pycharm doesn't like this for", ") ) ) ) if not pause: if first: pygame.mixer.music.play() first = False", "for pause/exit inp = term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or inp", "term.black_on_white( term.center( 'Paused. Press %s to unpause, or %s or %s to exit.'", "if inp == ' ': pause = not pause pygame.mixer.music.pause() if pause else", "if first: pygame.mixer.music.play() first = False ret, frame = capture.read() elapsed = time.time()", "| \" \"Actual frame: {} | \" \"Theoretical frame: {} | \" \"Dropped", "dropped_frames = 0 # load video capture = cv2.VideoCapture(path) # get fps fps", "pause/exit inp = term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or inp ==", "frame = capture.read() elapsed = time.time() - start expected_frame = int(elapsed * fps)", "= True # main loop while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01)", "im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} |", "Image, ImageOps import cv2 term = Terminal() HALF = '\\N{LOWER HALF BLOCK}' def", "| \" \"Dropped frames: {} | \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames,", "# get start time start = time.time() # variables frame_count = 1 dropped_frames", "print( term.black_on_white( term.center( 'Paused. Press %s to unpause, or %s or %s to", "capture.get(cv2.CAP_PROP_FPS) # load audio from video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0]", "import cv2 term = Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im): im", "from PIL import Image, ImageOps import cv2 term = Terminal() HALF = '\\N{LOWER", "loop while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) # esc if inp", "if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2)) print( term.black_on_white(", "pycharm doesn't like this for some reason # noinspection PyUnresolvedReferences r, g, b", "pause = not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height -", "term.normal ) ) ) ) if not pause: if first: pygame.mixer.music.play() first =", "= term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or inp == \"q\": break", "frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im))", "pixels[x, y * 2] # noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y", "noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y * 2 + 1] res", "HALF return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time", "Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height", "== \"q\": break if inp == ' ': pause = not pause pygame.mixer.music.pause()", "term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if not pause: if first: pygame.mixer.music.play()", "g2, b2) + HALF return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): #", "pygame.mixer.music.play() first = False ret, frame = capture.read() elapsed = time.time() - start", "\"Elapsed time: {} | \" \"Actual frame: {} | \" \"Theoretical frame: {}", "'' for y in range(im.size[1] // 2): for x in range(im.size[0]): # false", "%s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) +", ") ) if not pause: if first: pygame.mixer.music.play() first = False ret, frame", "b2 = pixels[x, y * 2 + 1] res += term.on_color_rgb(r, g, b)", "esc if inp == \"\\x1b\" or inp == \"q\": break if inp ==", "get start time start = time.time() # variables frame_count = 1 dropped_frames =", "+ \"Elapsed time: {} | \" \"Actual frame: {} | \" \"Theoretical frame:", "frame_count = 1 dropped_frames = 0 # load video capture = cv2.VideoCapture(path) #", "+ term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if not pause: if", "continue if not ret: break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im", "* 2] # noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y * 2", "variables frame_count = 1 dropped_frames = 0 # load video capture = cv2.VideoCapture(path)", "frame_count += 1 dropped_frames += 1 continue if not ret: break frame_count +=", "== ' ': pause = not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home", "v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause =", "2)) pixels = im.load() res = '' for y in range(im.size[1] // 2):", "noinspection PyUnresolvedReferences r, g, b = pixels[x, y * 2] # noinspection PyUnresolvedReferences", "positives, pycharm doesn't like this for some reason # noinspection PyUnresolvedReferences r, g,", "- 1) // 2)) print( term.black_on_white( term.center( 'Paused. Press %s to unpause, or", "# for pause/exit inp = term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or", "': pause = not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height", "some reason # noinspection PyUnresolvedReferences r, g, b = pixels[x, y * 2]", "sys import time import moviepy.editor import pygame from blessed import Terminal from PIL", "term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if not pause: if first:", "%s or %s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal,", "+ term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center( 'Paused. Press %s to", "ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load() res = '' for y", "with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start = time.time() # variables", "capture = cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from", "+ HALF return res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start", "= moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0]", "g, b) + term.color_rgb(r2, g2, b2) + HALF return res def video(path): with", "= 1 dropped_frames = 0 # load video capture = cv2.VideoCapture(path) # get", "- start expected_frame = int(elapsed * fps) if frame_count < expected_frame: frame_count +=", "fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video v = moviepy.editor.VideoFileClip(path) audio =", "v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init()", "// 2): for x in range(im.size[0]): # false positives, pycharm doesn't like this", "inp = term.inkey(timeout=0.01) # esc if inp == \"\\x1b\" or inp == \"q\":", "start time start = time.time() # variables frame_count = 1 dropped_frames = 0", "= not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1)", "ret, frame = capture.read() elapsed = time.time() - start expected_frame = int(elapsed *", "+ term.color_rgb(r2, g2, b2) + HALF return res def video(path): with term.cbreak(), term.hidden_cursor(),", "1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black", "\" \"Theoretical frame: {} | \" \"Dropped frames: {} | \" \"FPS: {}\".format(", "False first = True # main loop while capture.isOpened(): # for pause/exit inp", "# variables frame_count = 1 dropped_frames = 0 # load video capture =", "def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start = time.time()", "== \"\\x1b\" or inp == \"q\": break if inp == ' ': pause", "from video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play", "audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False", "b = pixels[x, y * 2] # noinspection PyUnresolvedReferences r2, g2, b2 =", "if frame_count < expected_frame: frame_count += 1 dropped_frames += 1 continue if not", "for x in range(im.size[0]): # false positives, pycharm doesn't like this for some", "+= 1 dropped_frames += 1 continue if not ret: break frame_count += 1", "= v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause", "Press %s to unpause, or %s or %s to exit.' % ( term.italic(term.bold(\"Space\"))", "pixels[x, y * 2 + 1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2,", "{} | \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count -", "import pygame from blessed import Terminal from PIL import Image, ImageOps import cv2", "0 # load video capture = cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS)", "import time import moviepy.editor import pygame from blessed import Terminal from PIL import", "(term.width, term.height * 2)) pixels = im.load() res = '' for y in", "term.center( 'Paused. Press %s to unpause, or %s or %s to exit.' %", "= False ret, frame = capture.read() elapsed = time.time() - start expected_frame =", "1 dropped_frames += 1 continue if not ret: break frame_count += 1 img", "image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \" \"Actual frame: {} |", "term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if not pause:", "or %s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\"))", "pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center( 'Paused. Press", "pause: if first: pygame.mixer.music.play() first = False ret, frame = capture.read() elapsed =", "inp == \"\\x1b\" or inp == \"q\": break if inp == ' ':", "import Terminal from PIL import Image, ImageOps import cv2 term = Terminal() HALF", "= time.time() - start expected_frame = int(elapsed * fps) if frame_count < expected_frame:", "break if inp == ' ': pause = not pause pygame.mixer.music.pause() if pause", "for some reason # noinspection PyUnresolvedReferences r, g, b = pixels[x, y *", "audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first = True # main", "y in range(im.size[1] // 2): for x in range(im.size[0]): # false positives, pycharm", "# noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y * 2 + 1]", "frames: {} | \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count", "expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed ) ) sys.stdout.flush() capture.release() cv2.destroyAllWindows() pygame.mixer.music.stop()", "g2, b2 = pixels[x, y * 2 + 1] res += term.on_color_rgb(r, g,", "term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start = time.time() # variables frame_count", "frame_count < expected_frame: frame_count += 1 dropped_frames += 1 continue if not ret:", "capture.read() elapsed = time.time() - start expected_frame = int(elapsed * fps) if frame_count", "{} | \" \"Actual frame: {} | \" \"Theoretical frame: {} | \"", "True # main loop while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) #", "int(elapsed * fps) if frame_count < expected_frame: frame_count += 1 dropped_frames += 1", "pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first = True # main loop", "y * 2 + 1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2,", "\"Theoretical frame: {} | \" \"Dropped frames: {} | \" \"FPS: {}\".format( elapsed,", "inp == ' ': pause = not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause()", "= Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width,", "\"q\": break if inp == ' ': pause = not pause pygame.mixer.music.pause() if", "' ': pause = not pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home +", "elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed ) )", "= '\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height * 2))", ") if not pause: if first: pygame.mixer.music.play() first = False ret, frame =", "{} | \" \"Dropped frames: {} | \" \"FPS: {}\".format( elapsed, frame_count -", "\".wav\") pause = False first = True # main loop while capture.isOpened(): #", "term = Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im,", "first = True # main loop while capture.isOpened(): # for pause/exit inp =", "1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF return", "Terminal from PIL import Image, ImageOps import cv2 term = Terminal() HALF =", "os import sys import time import moviepy.editor import pygame from blessed import Terminal", "# get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video v =", "res def video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start =", "from blessed import Terminal from PIL import Image, ImageOps import cv2 term =", "= int(elapsed * fps) if frame_count < expected_frame: frame_count += 1 dropped_frames +=", "term.white_on_black + \"Elapsed time: {} | \" \"Actual frame: {} | \" \"Theoretical", "time start = time.time() # variables frame_count = 1 dropped_frames = 0 #", "import sys import time import moviepy.editor import pygame from blessed import Terminal from", "%s to unpause, or %s or %s to exit.' % ( term.italic(term.bold(\"Space\")) +", "start expected_frame = int(elapsed * fps) if frame_count < expected_frame: frame_count += 1", "capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) # esc if inp == \"\\x1b\"", "\"\\x1b\" or inp == \"q\": break if inp == ' ': pause =", "else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center( 'Paused.", "+ \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first", "im = ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load() res = ''", "1) // 2)) print( term.black_on_white( term.center( 'Paused. Press %s to unpause, or %s", "term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF return res def video(path):", "# esc if inp == \"\\x1b\" or inp == \"q\": break if inp", "cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time:", "( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) )", "g, b = pixels[x, y * 2] # noinspection PyUnresolvedReferences r2, g2, b2", "r2, g2, b2 = pixels[x, y * 2 + 1] res += term.on_color_rgb(r,", "this for some reason # noinspection PyUnresolvedReferences r, g, b = pixels[x, y", "break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home +", "load audio from video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\")", "+= 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home + image(im)) sys.stdout.write(", "import moviepy.editor import pygame from blessed import Terminal from PIL import Image, ImageOps", "# load audio from video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] +", "audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\")", "in range(im.size[0]): # false positives, pycharm doesn't like this for some reason #", "unpause, or %s or %s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\"))", "| \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames)", "- dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed ) ) sys.stdout.flush() capture.release()", "range(im.size[0]): # false positives, pycharm doesn't like this for some reason # noinspection", "term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center( 'Paused. Press %s to unpause,", "= pixels[x, y * 2 + 1] res += term.on_color_rgb(r, g, b) +", "% ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) )", "cv2 term = Terminal() HALF = '\\N{LOWER HALF BLOCK}' def image(im): im =", "+ term.normal ) ) ) ) if not pause: if first: pygame.mixer.music.play() first", "\"Dropped frames: {} | \" \"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames,", "BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load()", "frame: {} | \" \"Dropped frames: {} | \" \"FPS: {}\".format( elapsed, frame_count", "= time.time() # variables frame_count = 1 dropped_frames = 0 # load video", "+= term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF return res def", "= capture.get(cv2.CAP_PROP_FPS) # load audio from video v = moviepy.editor.VideoFileClip(path) audio = v.audio", "HALF = '\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height *", "video(path): with term.cbreak(), term.hidden_cursor(), term.fullscreen(): # get start time start = time.time() #", "\"FPS: {}\".format( elapsed, frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed", "term.height * 2)) pixels = im.load() res = '' for y in range(im.size[1]", "elapsed = time.time() - start expected_frame = int(elapsed * fps) if frame_count <", "frame_count - dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed ) ) sys.stdout.flush()", "video capture = cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio", "pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center(", "term.hidden_cursor(), term.fullscreen(): # get start time start = time.time() # variables frame_count =", "PIL import Image, ImageOps import cv2 term = Terminal() HALF = '\\N{LOWER HALF", "+ image(im)) sys.stdout.write( term.white_on_black + \"Elapsed time: {} | \" \"Actual frame: {}", "to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal", "# false positives, pycharm doesn't like this for some reason # noinspection PyUnresolvedReferences", "<gh_stars>0 import os import sys import time import moviepy.editor import pygame from blessed", "print(term.home + term.move_y((term.height - 1) // 2)) print( term.black_on_white( term.center( 'Paused. Press %s", "| \" \"Theoretical frame: {} | \" \"Dropped frames: {} | \" \"FPS:", "def image(im): im = ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load() res", "PyUnresolvedReferences r2, g2, b2 = pixels[x, y * 2 + 1] res +=", "term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) )", "ret: break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im = Image.fromarray(img) sys.stdout.write(term.home", "dropped_frames, (frame_count - dropped_frames) / elapsed ) ) sys.stdout.flush() capture.release() cv2.destroyAllWindows() pygame.mixer.music.stop() video(sys.argv[1])", "time.time() # variables frame_count = 1 dropped_frames = 0 # load video capture", "time.time() - start expected_frame = int(elapsed * fps) if frame_count < expected_frame: frame_count", "2] # noinspection PyUnresolvedReferences r2, g2, b2 = pixels[x, y * 2 +", "False ret, frame = capture.read() elapsed = time.time() - start expected_frame = int(elapsed", "pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2)) print(", "dropped_frames += 1 continue if not ret: break frame_count += 1 img =", "// 2)) print( term.black_on_white( term.center( 'Paused. Press %s to unpause, or %s or", "= capture.read() elapsed = time.time() - start expected_frame = int(elapsed * fps) if", "if not ret: break frame_count += 1 img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) im =", "{} | \" \"Theoretical frame: {} | \" \"Dropped frames: {} | \"", "# noinspection PyUnresolvedReferences r, g, b = pixels[x, y * 2] # noinspection", "2 + 1] res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) +", "inp == \"q\": break if inp == ' ': pause = not pause", "play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first = True #", "if not pause: if first: pygame.mixer.music.play() first = False ret, frame = capture.read()", "not pause: if first: pygame.mixer.music.play() first = False ret, frame = capture.read() elapsed", "doesn't like this for some reason # noinspection PyUnresolvedReferences r, g, b =", "dropped_frames, expected_frame, dropped_frames, (frame_count - dropped_frames) / elapsed ) ) sys.stdout.flush() capture.release() cv2.destroyAllWindows()", "reason # noinspection PyUnresolvedReferences r, g, b = pixels[x, y * 2] #", "to unpause, or %s or %s to exit.' % ( term.italic(term.bold(\"Space\")) + term.normal,", "audio from video v = moviepy.editor.VideoFileClip(path) audio = v.audio audio.write_audiofile(path.split(\".\")[0] + \".wav\") #", "import Image, ImageOps import cv2 term = Terminal() HALF = '\\N{LOWER HALF BLOCK}'", "PyUnresolvedReferences r, g, b = pixels[x, y * 2] # noinspection PyUnresolvedReferences r2,", "= ImageOps.fit(im, (term.width, term.height * 2)) pixels = im.load() res = '' for", "'\\N{LOWER HALF BLOCK}' def image(im): im = ImageOps.fit(im, (term.width, term.height * 2)) pixels", "x in range(im.size[0]): # false positives, pycharm doesn't like this for some reason", "pause pygame.mixer.music.pause() if pause else pygame.mixer.music.unpause() print(term.home + term.move_y((term.height - 1) // 2))", "+= 1 continue if not ret: break frame_count += 1 img = cv2.cvtColor(frame,", "b) + term.color_rgb(r2, g2, b2) + HALF return res def video(path): with term.cbreak(),", "# play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first = True", "* 2)) pixels = im.load() res = '' for y in range(im.size[1] //", "fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video v = moviepy.editor.VideoFileClip(path) audio", "frame: {} | \" \"Theoretical frame: {} | \" \"Dropped frames: {} |", "main loop while capture.isOpened(): # for pause/exit inp = term.inkey(timeout=0.01) # esc if", "res += term.on_color_rgb(r, g, b) + term.color_rgb(r2, g2, b2) + HALF return res", "term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal ) ) ) ) if not", "fps) if frame_count < expected_frame: frame_count += 1 dropped_frames += 1 continue if", "'Paused. Press %s to unpause, or %s or %s to exit.' % (", "* fps) if frame_count < expected_frame: frame_count += 1 dropped_frames += 1 continue", "= 0 # load video capture = cv2.VideoCapture(path) # get fps fps =", "r, g, b = pixels[x, y * 2] # noinspection PyUnresolvedReferences r2, g2,", "im.load() res = '' for y in range(im.size[1] // 2): for x in", "\".wav\") # play audio pygame.mixer.init() pygame.mixer.music.load(path.split(\".\")[0] + \".wav\") pause = False first =", "= '' for y in range(im.size[1] // 2): for x in range(im.size[0]): #", "exit.' % ( term.italic(term.bold(\"Space\")) + term.normal, term.italic(term.bold(\"Escape\")) + term.normal, term.italic(term.bold(\"Q\")) + term.normal )", "= cv2.VideoCapture(path) # get fps fps = capture.get(cv2.CAP_PROP_FPS) # load audio from video" ]
[ "states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER,", "0 # for index, row in specific_date_df.iterrows(): # # print(index) # if row[\"state\"]", "covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as", "# # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # #", "in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1) # else: #", "index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df) # has all data for", "def line_prepender(filename, line): with open(filename, 'r+') as f: content = f.read() f.seek(0, 0)", "fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER =", "zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') #", "= pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]]", "# states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop #", "# print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this is to", "state, in this case Georgia # state = \"Georgia\" # index_counter = 0", "= [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop ==", "date): # \"for showing data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') #", "zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state,", "in spec_fips: boo = True for fips_census in fips_county_ids: if spec_fips == fips_census:", "# states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %} {%", "3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop, count in", "specific_date_df = make_df_for_date(input_date = current_date, df = df) fig = counties_heat_map(specific_date_df, new_date) #", "# one_state = '' # for state in fips_states_keys: # if state in", "spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) #", "= 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # # print(\"hi\") # f =", "dfmain[\"date\"][i] if i%50 == 0 and new_date != old_date: old_date = new_date new_date", "# # per county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k',", "states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping,", "missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with", "= i, yesterday=False) df, current_date = load_data(when = i, yesterday=True) # current_date =", "showing data per state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique())", "state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"]", "state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # counties =", "specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df", "# print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls =", "fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) #", "'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})", "# print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter)", "# print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k =", "data from census that is missing from covid # copy_df = copy_df[copy_df['log10_per100k'] !=", "\"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df)", "hover_name = \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19", ",').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "# # f = f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True,", "urllib.request import urlopen import json import pandas as pd import plotly.express as px", "# ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k Population Per", "titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\":", "pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '')", "== True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) # print(len(population_counties_list)) #", "list_str_states: total = 0 for index, row in specific_date_df.iterrows(): if row[\"state\"] == id_:", "state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "# print((counties[\"features\"][0])) #3221 # # per county # fig = px.choropleth(copy_df, geojson=counties, #", "row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] =", "for item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item: # #", "print(index_counter) # print(specific_date_Georgia_df) # has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as", "import json import pandas as pd import plotly.express as px import plotly from", "boo = False # # counter += 1 # if boo == True:", "return df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row", "picking out a specific state, in this case Georgia # state = \"Georgia\"", "Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('", "# copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson # with", "one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) #", "specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df = pd.DataFrame() list_state_count", "= df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date = df[\"date\"][when] return df, current_date", "fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia')", "29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import", "# print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]: for i,row", "Columbia\": # # print(\"aye\") # # print(one_state) # one_state = \"District of Columbia\"", "print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls", "pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District of Columbia\": # # #", "'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using", "print(specific_date_Georgia_df) # has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states", "# if row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state, '') #", "= per10k # # print(specific_date_df) # # print(per10k) # # import math #", "header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]: for", "census that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per", "# index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df) # has all data", "specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0)", "# per100k.append(1) # else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k #", "= os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id =", "county_id[n] = str(c_id) for n, s_id in enumerate(state_id): if s_id < 10: state_id[n]", "0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday: current_date", "plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main(): #[282519 rows x", "0 for index, row in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total", "[\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5},", "specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k = [] for pop, count", "# # print(item) # # print(pop_counties.shape) # states_col_for_county_pop = [] # for index,", "if row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh yeah\") # # row[\"Geographic", "row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"],", "# counties_list.append(counties) # # for index, row in pop_counties.iterrows(): # # if row[\"state\"]", "= specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown", "# # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k =", "fips_county_ids = [] for n, c_id in enumerate(county_id): if c_id < 10: county_id[n]", "print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia", "pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic", "= list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in enumerate(county_id):", "pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0] - 1]", "locationmode = 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\",", "Columbia\": # # print(\"huzzah\") # if state == \"District of Columbia\": # #", "row in specific_date_df.iterrows(): # # print(index) # if row[\"state\"] == state: # #", "in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1) # else: #", "# spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"])", "load_data(when = i, yesterday=False) df, current_date = load_data(when = i, yesterday=True) # current_date", "= pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) # # for value in", "zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids))", "old_date = '' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 ==", "spec_state): # boo = True # for p_county, p_state in zip(pop_county, pop_state): #", "print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]: # # if \"District \"", "# if \"District \" in value: # # print(value) # # for item", "= df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header =", "# # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) #", "pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh yeah\")", "json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k',", "for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), #", "= [] for n, c_id in enumerate(county_id): if c_id < 10: county_id[n] =", "list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list = [] # # counter =", "locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # #", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "# def counties_heat_map(specific_date_df, date): # \"for showing data per county\" # my_file =", "current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else:", "item in per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k #", "Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"},", "if row[\"Geographic Area\"] == \"District of Columbia\": # # # print(\"sure\") #yes #", "from plotly.offline import plot import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with", "urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values =", "= [] for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df", "fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for showing data per county\"", "columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out a specific state,", "row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list)) # # print((counties_list)) #", "[] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else:", "states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list", "my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id", "(count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # # print(per100k) # per10k", "fig #%% def main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date = ''", "json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df, geojson=counties,", "of Columbia\": # # # print(\"sure\") #yes # # print(specific_date_df) # for state,", "print(\"Date: \", new_date) # df, current_date = load_data(when = i, yesterday=False) df, current_date", "== \"District of Columbia\": # # # print(\"sure\") #yes # # print(specific_date_df) #", "3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k", "kaggle but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"]", "#yes # # print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"]", "pop_counties.iterrows(): # one_state = '' # for state in fips_states_keys: # if state", "pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df)", "= [] # for index, row in pop_counties.iterrows(): # one_state = '' #", "= list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list = [] # # counter", "per100k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1:", "line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\": main() # #%% # from", "# # print(pop_counties.shape) # states_col_for_county_pop = [] # for index, row in pop_counties.iterrows():", "IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df", "# images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%%", "os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values", "per state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states)", "pop_list # print(pop_list) # print(len(pop_list)) per100k = [] for pop, count in zip(states_only_df[\"state_pop\"],", "dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50", "Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor=", "per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k = []", "os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"])", "'r+') as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content)", "pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]:", "state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index,", "pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '')", "Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') #", "= 800, height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html')", "boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) # print(len(population_counties_list))", "title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date #", "import math log10_per100k = [] for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"]", "\"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases", "= \"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id)", "copy_df = specific_date_df.copy() # this is to remove data from census that is", "# # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode = 'USA-states',", "per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for", "# fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", #", "px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states', featureidkey", "# print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for", "specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] =", "per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k", "counties = row[\"Geographic Area\"].replace(state, '') # if state == \"District of Columbia\": #", "= os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties)", "range_color=(0, 10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\",", "color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode = 'USA-states', # featureidkey", "Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)',", "locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'}", "'') # # print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]: # #", "row in pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District of Columbia\": #", "# one_state = state # # if one_state == \"Distric of Columbia\": #", "# for p_county, p_state in zip(pop_county, pop_state): # if s_county == p_county and", "DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county", "row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k =", "[] for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df =", "= [] for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k #", "c_id in enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id <", "plot(fig) return fig #%% def main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date", "c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id", "# import math log10_per10k = [] for item in per10k: # print(item) log10_per10k.append(math.log10(item))", "# per county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', #", "= '' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0", "if one_state == \"Distric of Columbia\": # # print(\"huzzah\") # if state ==", "#3221 # # per county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips', #", "for picking out a specific state, in this case Georgia # state =", "fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %} {% block content %}", "per10k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1:", "if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1 if", "in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) #", "# break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys", "Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C',", "of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '')", "population_counties_list = [] # # counter = 0 # for s_county, s_state in", "has all data for current date # 3067 x 6 # specific_date_df =", "new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True # new_date", "Columbia\": # # # print(\"sure\") #yes # # print(specific_date_df) # for state, state_id", "# specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish',", "json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w", "#3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0,", "with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # #", "# print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state,", "specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per state\"", "'') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] =", "columns] dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if", "math # log10_per100k = [] # for item in per100k: # # print(item)", "= \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in", "print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", #", "specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for", "pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state", "= copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as", "Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date # ) # # fig.show()", "with open(filename, 'r+') as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n'", "per10k # print(specific_date_df) # print(per10k) # import math log10_per10k = [] for item", "in pop_counties.iterrows(): # one_state = '' # for state in fips_states_keys: # if", "print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import math # log10_per100k", "%} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\": main()", "# print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df, geojson=counties, locations='fips',", "== 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) #", "copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json')", "labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date):", "# print(specific_date_Georgia_df) # has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response:", "= 0 for spec_fips in spec_fips: boo = True for fips_census in fips_county_ids:", "# # print(pop_counties) # counties_list = [] # for index, row in pop_counties.iterrows():", "boo == True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # # print(population_counties_list)", "index, row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df)", "from census that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] #", "specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id)", "spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) #", "print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list = [] #", "states_col_for_county_pop # # print(pop_counties) # counties_list = [] # for index, row in", "= pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia", "= '2020-06-30' print(\"Date: \", new_date) # df, current_date = load_data(when = i, yesterday=False)", "item in per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k #", "content) f.write('\\n{% endblock %}') #%% def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv',", "pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with", "# print(pop_counties) # counties_list = [] # for index, row in pop_counties.iterrows(): #", "row[\"Geographic Area\"] == \"District of Columbia\": # # # print(\"sure\") #yes # #", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) # # for", "', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) #", "print(specific_date_df) # has all data for current date # 3067 x 6 #", "# specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for picking", "= row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def", "from urllib.request import urlopen import json import pandas as pd import plotly.express as", "# current_date = new_date specific_date_df = make_df_for_date(input_date = current_date, df = df) fig", "DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases population_counties_list =", "- 1] # 6/29/2020, or yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%%", "# print(index) # if row[\"state\"] == state: # # print(\"yes\") # # print(index)", "Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from", "[w.replace('ogia', 'orgia') for w in fips_states_keys] # these are in the data from", "== p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False #", "== \"__main__\": main() # #%% # from PIL import Image, ImageDraw # import", "# print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out a specific state, in", "== fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1 if boo ==", "= [] for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"]", "print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"]", "= True for fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo =", "-*- \"\"\" Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census", "100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for showing", "return fig #%% def counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file =", "in pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District of Columbia\": # #", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '')", "# new_date = '2020-06-30' print(\"Date: \", new_date) # df, current_date = load_data(when =", "fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter", "per10k # # print(specific_date_df) # # print(per10k) # # import math # log10_per10k", "'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county", "print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x:", "== id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"]", "in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"]", "= json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df,", "# these are in the data from kaggle but not in the geojson", "break # # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list #", "(count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"])", "DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"]", "= '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont", "data from census that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0]", "state in fips_states_keys: # if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state)", "= df.iloc[index] # print(specific_date_df) # has all data for current date # 3067", "[w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\":", ") # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using", "utf-8 -*- \"\"\" Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better", "pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] =", "odd_balls = [] # unknown county cases population_counties_list = [] # counter =", "\"District of Columbia\": # # # print(\"sure\") #yes # # print(specific_date_df) # for", "for filename in os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # #", "= pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) #", "fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape)", "# 6/29/2020 else: current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df", "# for value in pop_counties[\"Geographic Area\"]: # # if \"District \" in value:", "\"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"]", "for state in fips_states_keys: # if state in row[\"Geographic Area\"]: # if row[\"Geographic", "pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District", "one_state = '' # for state in fips_states_keys: # if state in row[\"Geographic", "'rb'), index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic", "current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states) fips_states_keys =", "= specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # #", "specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state)", "in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row", "= [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] =", "winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5,", "in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item: # # print(item) #", "if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # one_state", "this is to remove data from census that is missing from covid copy_df", "# f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f) # print(len(images))", "= [] # for index, row in pop_counties.iterrows(): # for state in fips_states_keys:", "for current date # 3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list =", "list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state =", "if state == \"District of Columbia\": # # print(\"trouble maker\") # # print(counties)", "'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # #", "x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] =", "= 0 for index, row in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_)", "count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1) # else:", "list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in enumerate(county_id): if c_id < 10:", "print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per", "from PIL import Image, ImageDraw # import PIL # import os # images", "= population_counties_list per100k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop", "False, width = 800, height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html')", "[] # for item in per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"]", "print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop,", "#[282519 rows x 6 columns] dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]):", "state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name']", "list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] # these are in", "county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\",", "yesterday=False) df, current_date = load_data(when = i, yesterday=True) # current_date = new_date specific_date_df", "print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out a specific state, in this", "of Columbia\": # # print(\"huzzah\") # if state == \"District of Columbia\": #", "per100k.append(1) # else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k # #", "# print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # #", "= list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))", "fig #%% def counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file = os.path.join(THIS_FOLDER,", "print(pop_list) # print(len(pop_list)) per100k = [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000", "counties_list # # print(pop_counties) # # for index, row in pop_counties.iterrows(): # #", "# print(specific_date_df) # print(per100k) per10k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]):", "dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True # new_date = '2020-06-30' print(\"Date:", "s_state in zip(spec_county, spec_state): # boo = True # for p_county, p_state in", "out a specific state, in this case Georgia # state = \"Georgia\" #", "current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday # current_date = df[\"date\"][10]", "list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total", "population_counties_list # # print(specific_date_df) # per100k = [] # for pop, count in", "[0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey = \"id\",", "count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop))", "save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for", "[] for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s)", "== \"Distric of Columbia\": # # print(\"huzzah\") # if state == \"District of", "%}') #%% def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date", "# specific_date_df[\"log10_per10k\"] = log10_per10k # # import math # log10_per100k = [] #", "= pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0] -", "covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson #", "* (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) # import math log10_per10k", "for index, row in pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District of", "= current_date, df = df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\")", "i%50 == 0 and new_date != old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0]", "zip(pop_county, pop_state): # if s_county == p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)])", "count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop))", "print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter) #", "has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response)", "pop_state): # if s_county == p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) #", "!= old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday", "rows x 6 columns] dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]): new_date", "response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig", "Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic", "- 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or", "= df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for", "= '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date # )", "log10_per10k = [] # for item in per10k: # # print(item) # log10_per10k.append(math.log10(item))", "Columbia\": # # print(\"trouble maker\") # # print(counties) # counties = \"District of", "* (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k = [] for", "w in fips_states_keys] # these are in the data from kaggle but not", "# for index, row in pop_counties.iterrows(): # one_state = '' # for state", "log10_per10k # import math log10_per100k = [] for item in per100k: # print(item)", "per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) #", "featureidkey = \"id\", # hover_name = \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5},", "pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] =", "current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values))", "# print(specific_date_df) # # print(per10k) # # import math # log10_per10k = []", "for item in per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k", "# # print(\"trouble maker\") # # print(counties) # counties = \"District of Columbia\"", "as px import plotly from plotly.offline import plot import os import math if", "print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w", "specific_date_df = specific_date_df.copy() IFR_list = [] for index, row in specific_date_df.iterrows(): if row[\"cases\"]", "# for item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item: #", "# print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips", "os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) #", "else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k =", "== state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k = []", "print(\"sure\") #yes # # print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values): #", "Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # one_state = state # #", "date # 3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list = [] for", "id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] =", "autosize = False, width = 800, height = 650 ) # fig.show() #", "# log10_per100k = [] # for item in per100k: # # print(item) #", "# color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']],", "'\\n' + content) f.write('\\n{% endblock %}') #%% def load_data(when = 0, yesterday=True): df", "< 10: county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id) else:", "# print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) #", "enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n]", "# #%% # from PIL import Image, ImageDraw # import PIL # import", "# for filename in os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) #", "df, current_date = load_data(when = i, yesterday=True) # current_date = new_date specific_date_df =", "locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\",", "Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') #", "this case Georgia # state = \"Georgia\" # index_counter = 0 # for", "Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font =", "= f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}') #%%", "# print(value) # # for item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\"", "specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) # import math log10_per10k = []", "= pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"]", "# 3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list = [] for index,", "if \"District \" in value: # # print(value) # # for item in", "fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1", "# print(\"sure\") #yes # # print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values):", "Columbia\" # if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) #", "for i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list #", "from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson", "but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df", "# pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list = [] # for", "# # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('.", "# print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] #", "df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+') as f: content", "# per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10),", "# boo = False # # counter += 1 # if boo ==", "print(len(pop_list)) per100k = [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop))", "urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys", "fips_states_keys] # these are in the data from kaggle but not in the", "# if state == \"District of Columbia\": # # print(\"aye\") # # print(one_state)", "value: # # print(value) # # for item in pop_counties[\"Geographic Area\"]: # #", "index, row in specific_date_df.iterrows(): # # print(index) # if row[\"state\"] == state: #", "pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic", "with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values", "Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of", "if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys =", "= list(pop_counties[2019]) # population_counties_list = [] # # counter = 0 # for", "pandas as pd import plotly.express as px import plotly from plotly.offline import plot", "'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\",", "specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values):", "10: county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n]", "= per100k # # print(specific_date_df) # # print(per100k) # per10k = [] #", "pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k = [] for pop,", "in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this", "for value in pop_counties[\"Geographic Area\"]: # # if \"District \" in value: #", "# spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"])", "math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys", "specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"]", "item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item: # # print(item)", "import plotly.express as px import plotly from plotly.offline import plot import os import", "# DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia #", "state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"]", "index, row in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total += row[\"cases\"]", "# # print((counties[\"features\"][0])) #3221 # # per county # fig = px.choropleth(copy_df, geojson=counties,", "\"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text", "Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) # # for value", "row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all data for", "county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df =", "# population_counties_list = [] # # counter = 0 # for s_county, s_state", "[] for n, c_id in enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id)", "if \"Virginia\" in item: # # print(item) # # print(pop_counties.shape) # states_col_for_county_pop =", "current_date = new_date specific_date_df = make_df_for_date(input_date = current_date, df = df) fig =", "* (count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # # print(per100k) #", "yesterday = True # new_date = '2020-06-30' print(\"Date: \", new_date) # df, current_date", "{\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"},", "#Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing data per county\" # my_file", "Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15,", "Columbia District of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('", "# if \"Virginia\" in item: # # print(item) # # print(pop_counties.shape) # states_col_for_county_pop", "state) county_and_state = [] for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\",", "\"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return", "for index, row in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]: #", "= states_col_for_county_pop # # print(pop_counties) # counties_list = [] # for index, row", "#%% # from PIL import Image, ImageDraw # import PIL # import os", "= True # for p_county, p_state in zip(pop_county, pop_state): # if s_county ==", "= row[\"Geographic Area\"].replace(state, '') # if state == \"District of Columbia\": # #", "15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen", "data for current date # 3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list", "spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1 if boo", "\"District of Columbia\": # # print(\"aye\") # # print(one_state) # one_state = \"District", "print(specific_date_Georgia_df) # # for picking out a specific state, in this case Georgia", "coding: utf-8 -*- \"\"\" Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset", "import PIL # import os # images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties'", "\"__main__\": main() # #%% # from PIL import Image, ImageDraw # import PIL", "# counties = \"District of Columbia\" # counties_list.append(counties) # # for index, row", "dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) #", "spec_fips in spec_fips: boo = True for fips_census in fips_county_ids: if spec_fips ==", "log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import math # log10_per100k = []", "# plot(fig) return fig #%% def main(): #[282519 rows x 6 columns] dfmain.shape[0]", "make_df_for_date(input_date = current_date, df = df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): #", "# print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda", "f = f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False,", "hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig", "state: # # print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] =", "# print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k = []", "# # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break #", "specific_date_df.iterrows(): # # print(index) # if row[\"state\"] == state: # # print(\"yes\") #", "of Columbia District of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "'orgia') for w in fips_states_keys] # these are in the data from kaggle", "per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df =", "is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson", "per10k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop", "+ '\\n' + content) f.write('\\n{% endblock %}') #%% def load_data(when = 0, yesterday=True):", "print(per100k) per10k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop ==", "\" in value: # # print(value) # # for item in pop_counties[\"Geographic Area\"]:", "(count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) # import math log10_per10k =", "# counter += 1 # if boo == True: # population_counties_list.append(1) # #", "# DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases population_counties_list", "'') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties)", "= Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif',", "# 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) #", "import urlopen import json import pandas as pd import plotly.express as px import", "# print(item) # # print(pop_counties.shape) # states_col_for_county_pop = [] # for index, row", "= pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ',", "[] for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import", "in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"]", "row in pop_counties.iterrows(): # one_state = '' # for state in fips_states_keys: #", "print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id)", "[] # unknown county cases population_counties_list = [] # counter = 0 for", "< 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600])", "county_and_state = [] for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+", "specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() # this is to remove data", "= IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) #", "color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name", "if row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state, '') # if", "index, row in pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District of Columbia\":", "print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]: for i,row in", "# print(one_state) # one_state = \"District of Columbia\" # if one_state in row[\"Geographic", "geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states', featureidkey =", "'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for", "# fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019", "counties_list = [] # for index, row in pop_counties.iterrows(): # for state in", "old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday =", "= specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df =", "# Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"])", "row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state, '') # if state", "the data from kaggle but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] !=", "images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date):", "# if pop == 1: # per100k.append(1) # else: # per100k.append(100000 * (count/pop))", "color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode =", "6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+') as f: content = f.read()", "# # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] =", "log10_per100k # copy_df = specific_date_df.copy() # this is to remove data from census", "[0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode =", "1 # # print(index_counter) # print(specific_date_Georgia_df) # has all data for current date", "height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig)", "'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize = False, width = 800,", "'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey = \"id\", hover_name", "current_date = load_data(when = i, yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date", "optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing data", "population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list", "specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df) # has all", "html_header) break #%% if __name__ == \"__main__\": main() # #%% # from PIL", "list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list =", "# color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode = 'USA-states', #", "zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row in", "# 6/29/2020, or yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename,", "scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per", "= \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig)", "row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] =", "current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows():", "def counties_heat_map(specific_date_df, date): # \"for showing data per county\" # my_file = os.path.join(THIS_FOLDER,", "os # images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in", "= \"District of Columbia\" # if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) #", "break #%% if __name__ == \"__main__\": main() # #%% # from PIL import", "# for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: #", "states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %} {% block", "\"for showing data per state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states =", "state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s", "fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain =", "main() # #%% # from PIL import Image, ImageDraw # import PIL #", "District of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County',", "print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = []", "= specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys,", "county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for", "# for index, row in specific_date_df.iterrows(): # # print(index) # if row[\"state\"] ==", "labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k", "log10_per10k # # import math # log10_per100k = [] # for item in", "in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic", "# # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per county # fig", "f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}') #%% def load_data(when", "as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{%", "\"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id in enumerate(state_id): if s_id <", "15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize", "\"District of Columbia\" # if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # #", "color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode", "else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) # import", "# locationmode = 'USA-states', # featureidkey = \"id\", # hover_name = \"county\", #", "# # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties)", "in specific_date_df.iterrows(): # # print(index) # if row[\"state\"] == state: # # print(\"yes\")", "= per10k # print(specific_date_df) # print(per10k) # import math log10_per10k = [] for", "== 1: # per100k.append(1) # else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] =", "index, row in pop_counties.iterrows(): # one_state = '' # for state in fips_states_keys:", "print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]):", "= make_df_for_date(input_date = current_date, df = df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df):", "df, current_date = load_data(when = i, yesterday=False) df, current_date = load_data(when = i,", "= False # counter += 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) #", "specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state,", "= dfmain[\"date\"][i] if i%50 == 0 and new_date != old_date: old_date = new_date", "\"\"\" {% extends 'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html',", "Population Per County<br>Using 2019 Census Estimations<br>'+date # ) # # fig.show() # #", "states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # #", "date): \"for showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file))", "per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k) # import math", "if pop == 1: # per100k.append(1) # else: # per100k.append(100000 * (count/pop)) #", "print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1]", "'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str})", "= \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread", "specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"]", "data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states)", "# pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic", "pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # #", "\"Virginia\" in item: # # print(item) # # print(pop_counties.shape) # states_col_for_county_pop = []", "i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0 and new_date !=", "pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state =", "True for fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False", "= json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia',", "new_date = dfmain[\"date\"][i] if i%50 == 0 and new_date != old_date: old_date =", "= specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id", "# print(counties) # counties = \"District of Columbia\" # counties_list.append(counties) # # for", "cases population_counties_list = [] # counter = 0 for spec_fips in spec_fips: boo", "print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k = [] #", "specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values))", "per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this is", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county =", "specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] #", "Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', #", "Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for", "import plotly from plotly.offline import plot import os import math if not os.path.exists(\"images_counties\"):", "1 # if boo == True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065", "== p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter += 1", "str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s + c)", "loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing data per county\"", "urlopen import json import pandas as pd import plotly.express as px import plotly", "filename in os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f", "#%% def line_prepender(filename, line): with open(filename, 'r+') as f: content = f.read() f.seek(0,", "print(pop_counties.shape) # states_col_for_county_pop = [] # for index, row in pop_counties.iterrows(): # one_state", "== \"District of Columbia\": # # print(\"trouble maker\") # # print(counties) # counties", "1] # if yesterday = True # new_date = '2020-06-30' print(\"Date: \", new_date)", "print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020,", "__name__ == \"__main__\": main() # #%% # from PIL import Image, ImageDraw #", "= px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", #", "# counter = 0 for spec_fips in spec_fips: boo = True for fips_census", "# images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory):", "IFR_list = [] for index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR", "# locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), #", "# log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import math # log10_per100k =", "if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) #", "print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() # this", "to remove data from census that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k']", "index_counter = 0 # for index, row in specific_date_df.iterrows(): # # print(index) #", "14, \"color\":\"White\"}, autosize = False, width = 800, height = 650 ) #", "i, yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date = current_date, df =", "states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping", "pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"]", "0 and new_date != old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1]", "- 1] # if yesterday = True # new_date = '2020-06-30' print(\"Date: \",", "[] for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] == state:", "= per100k # print(specific_date_df) # print(per100k) per10k = [] for pop, count in", "specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District of", "#%% def counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv')", "{% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__", "this is to remove data from census that is missing from covid #", "= list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total = 0 for index,", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] =", "unknown county cases # print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] =", "fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id", "content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}')", "= [] for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] ==", "pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states:", "= list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys]", "if s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57])", "row in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total += row[\"cases\"] list_state_count.append(total)", "True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"]", "in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return", "'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states',", "# color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode", "population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1 if boo == True: population_counties_list.append(1)", "== 1: # per10k.append(1) # else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] =", "# population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] =", "pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1])", "if row[\"state\"] == state: # # print(\"yes\") # # print(index) # # print(copy_new_df.index[index])", "zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] =", "print((counties[\"features\"][0])) #3221 # # per county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips',", "state = \"Georgia\" # index_counter = 0 # for index, row in specific_date_df.iterrows():", "<body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\": main() #", "duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing data per", "of Columbia\" # counties_list.append(counties) # # for index, row in pop_counties.iterrows(): # #", "'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\",", "all data for current date # 3067 x 6 # specific_date_df = specific_date_df.copy()", "fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\",", "# color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5),", "counties = \"District of Columbia\" # counties_list.append(counties) # # for index, row in", "counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per", "# index_counter = 0 # for index, row in specific_date_df.iterrows(): # # print(index)", "id_ in list_str_states: total = 0 for index, row in specific_date_df.iterrows(): if row[\"state\"]", "'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%%", "= list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt')", "pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') #", "IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None,", "population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) # print(len(population_counties_list)) # 3065 #", "content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\":", "0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) #", "line_prepender(filename, line): with open(filename, 'r+') as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n')", "= specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state,", "pop_counties.iterrows(): # for state in fips_states_keys: # if state in row[\"Geographic Area\"]: #", "geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize = False, width =", "# f = f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:],", "print(value) # # for item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in", "style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\": main() # #%%", "import math # log10_per10k = [] # for item in per10k: # #", "# DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county =", "in the data from kaggle but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"]", "= pop_list # print(pop_list) # print(len(pop_list)) per100k = [] for pop, count in", "# print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows():", "# for item in per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] =", "== 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) #", "\"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\",", "= '' # for state in fips_states_keys: # if state in row[\"Geographic Area\"]:", "in fips_states_keys] # these are in the data from kaggle but not in", "= counties_list # # print(pop_counties) # # for index, row in pop_counties.iterrows(): #", "new_date specific_date_df = make_df_for_date(input_date = current_date, df = df) fig = counties_heat_map(specific_date_df, new_date)", "# # # print(\"sure\") #yes # # print(specific_date_df) # for state, state_id in", "import pandas as pd import plotly.express as px import plotly from plotly.offline import", "Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto", "# import math log10_per100k = [] for item in per100k: # print(item) log10_per100k.append(math.log10(item))", "per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import", "print(specific_date_df) # # print(per100k) # per10k = [] # for pop, count in", "plot import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response:", "6/29/2020 else: current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df =", "(count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # # print(per10k) # #", "# # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties =", "specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"]", "as response: fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values())", "[] for index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"]", "= [] for index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR =", "= \"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id in enumerate(state_id): if s_id", "c,s in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id))", "# state = \"Georgia\" # index_counter = 0 # for index, row in", "df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all", "[] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1:", "data from kaggle but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern", "print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\",", "pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list = [] # for index,", "= list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n,", "# if s_county == p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo", "list(pop_counties[2019]) # population_counties_list = [] # # counter = 0 # for s_county,", "are in the data from kaggle but not in the geojson specific_date_df =", "Area\"].str.replace(' ', '') # # print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]:", "if pop == 1: # per10k.append(1) # else: # per10k.append(10000 * (count/pop)) #", "row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state", "800, height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') #", "data per state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) #", "for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states) fips_states_keys", "per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None,", "# # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy()", "directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # # print(\"hi\") # f", "# # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import math", "* (count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # # print(per10k) #", "remove data from census that is missing from covid # copy_df = copy_df[copy_df['log10_per100k']", "= px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\",", "if pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k #", "= [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} )", "for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df)", "c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '')", "fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys =", "math # log10_per10k = [] # for item in per10k: # # print(item)", "row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) #", "old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True", "data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json import pandas as pd", "import math # log10_per100k = [] # for item in per100k: # #", "not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df =", "states_heat_map(specific_date_df): \"for showing data per state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states", "# if boo == True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 #", "list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER", "= list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state", "in zip(spec_county, spec_state): # boo = True # for p_county, p_state in zip(pop_county,", "plotly.express as px import plotly from plotly.offline import plot import os import math", "# 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop, count", "response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 #", "# 6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+') as f: content =", "'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) # #", "row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df =", "= specific_date_df.copy() IFR_list = [] for index, row in specific_date_df.iterrows(): if row[\"cases\"] >", "my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list", "# print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]: # # if \"District", "1: # per10k.append(1) # else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k", ") fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019", "row in pop_counties.iterrows(): # for state in fips_states_keys: # if state in row[\"Geographic", "6 # specific_date_df = specific_date_df.copy() IFR_list = [] for index, row in specific_date_df.iterrows():", "open(filename, 'r+') as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' +", "state\" states_only_df = pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for", "Area\"] = counties_list # # print(pop_counties) # # for index, row in pop_counties.iterrows():", "per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), #", "state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # one_state =", "for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"]", "# # for picking out a specific state, in this case Georgia #", "- 1] # 6/29/2020 else: current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date,", "fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census", "showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties)", "list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total = 0 for index, row", "# # if row[\"Geographic Area\"] == \"District of Columbia\": # # # print(\"sure\")", "to remove data from census that is missing from covid # copy_df =", "dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday # current_date = df[\"date\"][10] # 6/29/2020", "# for index, row in pop_counties.iterrows(): # for state in fips_states_keys: # if", "json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per county #", "return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df = pd.DataFrame()", "print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id))", "# pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) # # for index, row", "as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county", "# # print(specific_date_df) # # print(per10k) # # import math # log10_per10k =", "images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard", "# winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'],", "import os # images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename", "these are in the data from kaggle but not in the geojson specific_date_df", "log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() # this is to", "zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] =", "for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1)", "= counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends", "# # import math # log10_per10k = [] # for item in per10k:", "pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out a specific", "# for index, row in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]:", "else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) #", "!= \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys,", "# counter = 0 # for s_county, s_state in zip(spec_county, spec_state): # boo", "# print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per county # fig =", "in fips_states_keys: # if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) >", "county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) # #", "in per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # #", "locationmode = 'USA-states', # featureidkey = \"id\", # hover_name = \"county\", # scope=\"usa\",", "IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df)", "= [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv',", "fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] # these are in the", "\"\"\" from urllib.request import urlopen import json import pandas as pd import plotly.express", "states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k = [] for pop, count", "df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\"", "[] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else:", "in zip(pop_county, pop_state): # if s_county == p_county and s_state == p_state: #", "n, c_id in enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id", "row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) #", "f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}') #%% def load_data(when = 0,", "# # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter +=", "100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id in enumerate(state_id):", "# specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df)", "= f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500,", "the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] !=", "print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday # current_date", "x 6 columns] dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]): new_date =", "True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) # print(len(population_counties_list)) # 3065", "= [w.replace('ogia', 'orgia') for w in fips_states_keys] # these are in the data", "plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file", "= os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list =", "= pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '')", "with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state", "= pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # #", "# current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+')", "= [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1)", "!= 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response)", "fips_states_keys: # if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1:", "n, s_id in enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n]", "yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list)) #", "fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode =", "Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"]", "state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for", "= log10_per100k copy_df = specific_date_df.copy() # this is to remove data from census", "f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock", "# # print(one_state) # one_state = \"District of Columbia\" # if one_state in", "\"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing", "!= \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) #", "# if row[\"state\"] == state: # # print(\"yes\") # # print(index) # #", "specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia", "count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json')", "county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per", "os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] -", "elif c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for n,", "for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000", "# print(per100k) per10k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop", "'' if yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date =", "specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df", "of Columbia\" # if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop))", "ImageDraw # import PIL # import os # images = [] # directory", "# population_per_county = list(pop_counties[2019]) # population_counties_list = [] # # counter = 0", "specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df) #", "True # for p_county, p_state in zip(pop_county, pop_state): # if s_county == p_county", "line): with open(filename, 'r+') as f: content = f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') +", "def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if", "Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') #", "< 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id in", "6 columns] dfmain.shape[0] old_date = '' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i]", "# if yesterday = True # new_date = '2020-06-30' print(\"Date: \", new_date) #", "showing data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file,", "for fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False #", "# print(pop_list) # print(len(pop_list)) per100k = [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]):", "response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df,", "in enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id < 100:", "population_counties_list per100k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop ==", "# break # # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list", "print(per10k) # import math log10_per10k = [] for item in per10k: # print(item)", "0] # # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties", "enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) #", "hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'}", "= list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list", "row[\"state\"] == state: # # print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) #", "row[\"Geographic Area\"]: # # # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') #", "geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0, 5),", "[] # counter = 0 for spec_fips in spec_fips: boo = True for", "# print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # #", "p_county, p_state in zip(pop_county, pop_state): # if s_county == p_county and s_state ==", "= copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties", "state # # if one_state == \"Distric of Columbia\": # # print(\"huzzah\") #", "= specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') #", "'') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia',", "print(per10k) # # import math # log10_per10k = [] # for item in", "specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k", "'') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county", "[] # for index, row in pop_counties.iterrows(): # for state in fips_states_keys: #", "states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states =", "counties_list.append(counties) # # for index, row in pop_counties.iterrows(): # # if row[\"state\"] in", "pd import plotly.express as px import plotly from plotly.offline import plot import os", "0 # for s_county, s_state in zip(spec_county, spec_state): # boo = True #", "# from PIL import Image, ImageDraw # import PIL # import os #", "print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list", "THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) #", "in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) #", "if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all data", "'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey =", "population_per_county = list(pop_counties[2019]) # population_counties_list = [] # # counter = 0 #", "# df, current_date = load_data(when = i, yesterday=False) df, current_date = load_data(when =", "Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) #", "# print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) #", "px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", #", "# print(\"trouble maker\") # # print(counties) # counties = \"District of Columbia\" #", "print(pop_counties) # # for index, row in pop_counties.iterrows(): # # if row[\"Geographic Area\"]", "# print(specific_date_df) # print(per10k) # import math log10_per10k = [] for item in", "list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] =", "# counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # #", "else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id):", "specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '')", "geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\",", "= [] # counter = 0 for spec_fips in spec_fips: boo = True", "locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", #", "print(\"trouble maker\") # # print(counties) # counties = \"District of Columbia\" # counties_list.append(counties)", "from kaggle but not in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana", "row[\"Geographic Area\"].find(state) > 1: # one_state = state # # if one_state ==", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District of Columbia')", "# if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: #", "Census Estimations<br>'+date # ) # # fig.show() # # plot(fig) # return fig", "print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips =", "Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"]", ") # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%%", "for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0 and new_date", "= [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # #", "print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file =", "# import os # images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for", "# print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count", "for index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"] /", "and new_date != old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] #", "# print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) # # for", "df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"] ==", "+\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for", "\"Georgia\" # index_counter = 0 # for index, row in specific_date_df.iterrows(): # #", "f.write('\\n{% endblock %}') #%% def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\":", "# print(\"huzzah\") # if state == \"District of Columbia\": # # print(\"aye\") #", "> 1: # one_state = state # # if one_state == \"Distric of", "state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.',", "* (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response)", "= 'USA-states', # featureidkey = \"id\", # hover_name = \"county\", # scope=\"usa\", #", "county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for n, s_id in enumerate(state_id): if", "= [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1)", "#%% def main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date = '' for", "import Image, ImageDraw # import PIL # import os # images = []", "print(specific_date_df) # per100k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): #", "%} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if", "if state == \"District of Columbia\": # # print(\"aye\") # # print(one_state) #", "= 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday:", "1: # one_state = state # # if one_state == \"Distric of Columbia\":", "Area\"]: # # # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # #", "# per10k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if", "html_header = \"\"\" {% extends 'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\">", "for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"]", "specific_date_df.copy() IFR_list = [] for index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0:", "# # for value in pop_counties[\"Geographic Area\"]: # # if \"District \" in", "df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index,", "list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id", "# # print(value) # # for item in pop_counties[\"Geographic Area\"]: # # if", "spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases population_counties_list = []", "# save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): #", "{\"size\": 14, \"color\":\"White\"}, autosize = False, width = 800, height = 650 )", "os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = []", "for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1)", "current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+') as", "list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0)", "x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"]", "of Columbia') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic", "= state # # if one_state == \"Distric of Columbia\": # # print(\"huzzah\")", "5), # locationmode = 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data =", "'' # for state in fips_states_keys: # if state in row[\"Geographic Area\"]: #", "in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # one_state = state", "# print(specific_date_df) # # print(per100k) # per10k = [] # for pop, count", "states_col_for_county_pop = [] # for index, row in pop_counties.iterrows(): # one_state = ''", "specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state =", "columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index]", "# if row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh yeah\") # #", "f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f) # print(len(images)) #", "if row[\"state\"] == id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count)", "if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k", "# print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] #", "# print(per10k) # import math log10_per10k = [] for item in per10k: #", "load_data(when = i, yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date = current_date,", "Area\"].replace(state, '') # if state == \"District of Columbia\": # # print(\"trouble maker\")", "yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday: current_date =", "# # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list)) # #", "'<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont =", "pop == 1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df)", "new_date = '2020-06-30' print(\"Date: \", new_date) # df, current_date = load_data(when = i,", "c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%%", "in list_str_states: total = 0 for index, row in specific_date_df.iterrows(): if row[\"state\"] ==", "# plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main(): #[282519 rows x 6", "Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"])", "df.iloc[index] # print(specific_date_df) # has all data for current date # 3067 x", "= 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} )", "Columbia\" # counties_list.append(counties) # # for index, row in pop_counties.iterrows(): # # if", "# per10k.append(1) # else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k #", "= load_data(when = i, yesterday=False) df, current_date = load_data(when = i, yesterday=True) #", "print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = []", "for index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"]", "# color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'],", "[] # # counter = 0 # for s_county, s_state in zip(spec_county, spec_state):", "# copy_df = specific_date_df.copy() # this is to remove data from census that", "specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out", "+ c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.',", "# print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', #", "Area\"]: # # if \"Virginia\" in item: # # print(item) # # print(pop_counties.shape)", "extends 'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break", "f.read() f.seek(0, 0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}') #%% def", "!= \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] !=", "# print(per10k) # # import math # log10_per10k = [] # for item", "response: fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys", "state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"]", "# # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) # #", "# range_color=(0, 10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\",", "= '' if yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date", "# print(\"aye\") # # print(one_state) # one_state = \"District of Columbia\" # if", "# # print(specific_date_df) # per100k = [] # for pop, count in zip(specific_date_df[\"county_population\"],", "# # print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list #", "index, row in specific_date_df.iterrows(): c_and_s = row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] =", "if c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n] =", "= [] # for item in per10k: # # print(item) # log10_per10k.append(math.log10(item)) #", "per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id =", "# per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # #", "# unknown county cases population_counties_list = [] # counter = 0 for spec_fips", "# print(len(pop_list)) per100k = [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 *", "# print(index_counter) # print(specific_date_Georgia_df) # has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json')", "unknown county cases population_counties_list = [] # counter = 0 for spec_fips in", "on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\"", "# population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter += 1 # if", "# # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 # #", "# # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District", "data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id", "else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df =", "p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter += 1 #", "missing from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per county", "return fig #%% def main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date =", "= \"id\", # hover_name = \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, #", "state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] =", "{% extends 'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header)", "census that is missing from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] #", "# print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list = []", "1: # per100k.append(1) # else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k", "not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys())", "# for state in fips_states_keys: # if state in row[\"Geographic Area\"]: # if", "= 0 # for s_county, s_state in zip(spec_county, spec_state): # boo = True", "print(per100k) # per10k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): #", "# unknown county cases # print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"]", "if row[\"Geographic Area\"].find(state) > 1: # one_state = state # # if one_state", "# print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop, count in zip(specific_date_df[\"county_population\"],", "total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] =", "= dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True # new_date = '2020-06-30'", "range_color=(0, 5), # # locationmode = 'USA-states', # featureidkey = \"id\", # hover_name", "https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] =", "0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list #", "Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font", "print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\",", "6/29/2020, or yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line):", "# print(pop_counties) # # for index, row in pop_counties.iterrows(): # # if row[\"Geographic", "= specific_date_df.copy() # this is to remove data from census that is missing", "boo = True # for p_county, p_state in zip(pop_county, pop_state): # if s_county", "# # print(pop_counties) # # for index, row in pop_counties.iterrows(): # # if", "index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"]", "yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date = df[\"date\"][when] return", "# print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] =", "in the geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"]", "pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per10k.append(1) else: per10k.append(10000 *", "= specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row in specific_date_df.iterrows(): c_and_s =", "row[\"state\"] == id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count))", "Image, ImageDraw # import PIL # import os # images = [] #", "# if row[\"Geographic Area\"] == \"District of Columbia\": # # # print(\"sure\") #yes", "print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k = [] for item", "specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # # print(per100k) # per10k = []", "date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys())", "\"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k':", "# specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() # this is to remove", "# import PIL # import os # images = [] # directory =", "sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] =", "import plot import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as", "specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k = [] # for pop,", "for index, row in specific_date_df.iterrows(): # # print(index) # if row[\"state\"] == state:", "(count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k = [] for pop,", "response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia',", "if state in row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # counties", "per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # # print(per100k)", "Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District of Columbia') #", "= str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s +", "# fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %} {% block content", "Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state']", "# print(pop_counties.shape) # states_col_for_county_pop = [] # for index, row in pop_counties.iterrows(): #", "for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = []", "w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head)", "# \"for showing data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties", "index, row in pop_counties.iterrows(): # for state in fips_states_keys: # if state in", "# # print(per10k) # # import math # log10_per10k = [] # for", "<filename>us_counties_death_per_cases.py # -*- coding: utf-8 -*- \"\"\" Created on Mon Jun 29 15:54:28", "Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state, '') # if state ==", "for index, row in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total +=", "as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig =", "# # break # # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] =", "# color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'],", "= specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state", "per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner #", "data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'),", "specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all data for current date #", "scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df,", "2019 Census Estimations<br>'+date # ) # # fig.show() # # plot(fig) # return", "# specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df) # per100k = [] # for", "specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county", "counter = 0 for spec_fips in spec_fips: boo = True for fips_census in", "in pop_counties.iterrows(): # for state in fips_states_keys: # if state in row[\"Geographic Area\"]:", "for n, c_id in enumerate(county_id): if c_id < 10: county_id[n] = \"00\"+str(c_id) elif", "row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR)", "True # new_date = '2020-06-30' print(\"Date: \", new_date) # df, current_date = load_data(when", "\"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19", "# range_color=(0, 5), # # locationmode = 'USA-states', # featureidkey = \"id\", #", "in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else:", "for index, row in pop_counties.iterrows(): # for state in fips_states_keys: # if state", "\"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def", "print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f) #", "p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # #", "# print(per100k) # per10k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]):", "for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"])", "for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math", "in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"]", "Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) #", "pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) #", "fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main():", "geojson specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin", "Area\"]: # # if \"District \" in value: # # print(value) # #", "# fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def", "os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states =", "'' for i in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0 and", "Total Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\":", "states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"]))", "'') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia #", "boo = False # counter += 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips)", "yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date = current_date, df = df)", "log10_per100k = [] # for item in per100k: # # print(item) # log10_per100k.append(math.log10(item))", "endblock %}') #%% def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str})", "states_only_df = pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_", "\"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text =", "# hover_name = \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text =", "log10_per100k = [] for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k", "new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %}", "= log10_per10k # # import math # log10_per100k = [] # for item", "1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday", "in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as", "hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels", "print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1 #", "load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if", "in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) #", "per100k # # print(specific_date_df) # # print(per100k) # per10k = [] # for", "current date # 3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list = []", "!= 0] # # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: #", "Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"]", "dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1])", "'2020-06-30' print(\"Date: \", new_date) # df, current_date = load_data(when = i, yesterday=False) df,", "print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per county # fig = px.choropleth(copy_df,", "\"color\":\"White\"}, autosize = False, width = 800, height = 650 ) # fig.show()", "\"for showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) #", "remove data from census that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] !=", "5), # # locationmode = 'USA-states', # featureidkey = \"id\", # hover_name =", "print(specific_date_df) # # print(per10k) # # import math # log10_per10k = [] #", "County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # #", "featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show()", "list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain", "== \"District of Columbia\": # # print(\"aye\") # # print(one_state) # one_state =", "i, yesterday=False) df, current_date = load_data(when = i, yesterday=True) # current_date = new_date", "# else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df)", "'') # # break # # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"]", "Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') #", "= str(c_id) for n, s_id in enumerate(state_id): if s_id < 10: state_id[n] =", "\", new_date) # df, current_date = load_data(when = i, yesterday=False) df, current_date =", "in specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) #", "= i, yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date = current_date, df", "\"District of Columbia\": # # print(\"trouble maker\") # # print(counties) # counties =", "print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter", "# print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s + c) #", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"])", "s_county, s_state in zip(spec_county, spec_state): # boo = True # for p_county, p_state", "> 1: # counties = row[\"Geographic Area\"].replace(state, '') # if state == \"District", "= load_data(when = i, yesterday=True) # current_date = new_date specific_date_df = make_df_for_date(input_date =", "Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.',", "False # # counter += 1 # if boo == True: # population_counties_list.append(1)", "/ row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True)", "\"\"\" Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data", "# has all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states =", "= json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id',", "row[\"Geographic Area\"].replace(state, '') # if state == \"District of Columbia\": # # print(\"trouble", "in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh", "+= 1 # if boo == True: # population_counties_list.append(1) # # print(len(population_counties_list)) #", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"] =", "specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # # print(per10k) # # import math", "# # if \"District \" in value: # # print(value) # # for", "specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df =", "# pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019])", "input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all data for current date", "= log10_per100k # copy_df = specific_date_df.copy() # this is to remove data from", "population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter += 1 # if boo", "index, row in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]: # #", "per100k = [] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"]", "# print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') #", "with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 #", "# # print(specific_date_df) # # print(per100k) # per10k = [] # for pop,", "scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k Population", "color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0,", "# locationmode = 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\",", "[] for pop, count in zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k", "= list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file,", "counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties =", "# print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties)", "#%% if __name__ == \"__main__\": main() # #%% # from PIL import Image,", "in enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id)", "pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) # # for index, row in", "pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if pop == 1: per100k.append(1) else: per100k.append(100000 *", "in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] =", "\"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per", "= dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday # current_date = df[\"date\"][10] #", "zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1) # else: # per10k.append(10000", "in os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f =", "if i%50 == 0 and new_date != old_date: old_date = new_date new_date =", "\"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] = str(c_id) for", "[] # for item in per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"]", "!= \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"]", "= \"Georgia\" # index_counter = 0 # for index, row in specific_date_df.iterrows(): #", "# for index, row in pop_counties.iterrows(): # # if row[\"Geographic Area\"] == \"District", "print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list))", "= \"state_name\", scope=\"usa\", labels={'per100k':'cases per 100k'} ) fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%%", "import math log10_per10k = [] for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"]", "pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"])", "for item in per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k", "# # print(\"huzzah\") # if state == \"District of Columbia\": # # print(\"aye\")", "= pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # # print(pop_counties) # # print(pop_counties[\"Geographic Area\"]) #", "# # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) #", "plotly from plotly.offline import plot import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\")", "import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states", "# # for index, row in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic", "color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\",", "census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json import pandas as", "geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221", "print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1])", "import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response)", "IFR_list # print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df)", "# # if one_state == \"Distric of Columbia\": # # print(\"huzzah\") # if", "= list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] # these are", "s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) #", "counter = 0 # for s_county, s_state in zip(spec_county, spec_state): # boo =", "# # print(per100k) # per10k = [] # for pop, count in zip(specific_date_df[\"county_population\"],", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District", "# # print(\"sure\") #yes # # print(specific_date_df) # for state, state_id in zip(fips_states_keys,", "3067 x 6 # specific_date_df = specific_date_df.copy() IFR_list = [] for index, row", "= \"District of Columbia\" # counties_list.append(counties) # # for index, row in pop_counties.iterrows():", "= False, width = 800, height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html')", "append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing", "# print(list_str_states) for id_ in list_str_states: total = 0 for index, row in", "# featureidkey = \"id\", # hover_name = \"county\", # scope=\"usa\", # ) #", "for s_county, s_state in zip(spec_county, spec_state): # boo = True # for p_county,", "pop_counties[\"Geographic Area\"]: # # if \"District \" in value: # # print(value) #", "states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) #", "print(list_str_states) for id_ in list_str_states: total = 0 for index, row in specific_date_df.iterrows():", "print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) #", "color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6,", "# DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic", "row[\"county\"] +\", \"+ row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df):", "and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter", "urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per", "#%% def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df = pd.DataFrame() list_state_count =", "for c,s in zip(county_id, state_id): fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) #", "open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 # per state fig", "print(specific_date_df) # print(per10k) # import math log10_per10k = [] for item in per10k:", "1] # 6/29/2020, or yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%% def", "yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with open(filename,", "p_state in zip(pop_county, pop_state): # if s_county == p_county and s_state == p_state:", "specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1) # else: # per100k.append(100000 *", "pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) # # for value in pop_counties[\"Geographic", "\"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] !=", "plotly.offline import plot import os import math if not os.path.exists(\"images_counties\"): os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json')", "def main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date = '' for i", "# print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids =", "current_date = load_data(when = i, yesterday=False) df, current_date = load_data(when = i, yesterday=True)", "str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date", "c_id < 10: county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id)", "plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize = False, width", "math log10_per100k = [] for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] =", "print(specific_date_df) # print(per100k) per10k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if", "# specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # # print(per10k) # # import", "for w in fips_states_keys] THIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) #", "geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"])", "i,row in pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list)", "for n, s_id in enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id) else:", "print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) # # for index,", "in zip(fips_states_keys, fips_states_values): specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for", "pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids", "= {\"size\": 14, \"color\":\"White\"}, autosize = False, width = 800, height = 650", "= px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states',", "# print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date =", "f.save(filename) # images.append(f) # print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0)", "1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k)", "row in pop_counties.iterrows(): # # if row[\"state\"] in row[\"Geographic Area\"]: # # #", "# log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() # this is", "for p_county, p_state in zip(pop_county, pop_state): # if s_county == p_county and s_state", "width = 800, height = 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') #", "\"for showing data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties =", "value in pop_counties[\"Geographic Area\"]: # # if \"District \" in value: # #", "= 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\",", "in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '')", "# print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df = specific_date_df.copy() #", "print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0]", "population_counties_list = [] # counter = 0 for spec_fips in spec_fips: boo =", "my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') # #", "# print(specific_date_df) # has all data for current date # 3067 x 6", "in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k =", "# print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"])", "= pd.DataFrame() list_state_count = [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in", "of Columbia\": # # print(\"trouble maker\") # # print(counties) # counties = \"District", "print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k',", "odd_balls.append(spec_fips) # unknown county cases # print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list)", "[1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey = \"id\", hover_name =", "Georgia # state = \"Georgia\" # index_counter = 0 # for index, row", "# locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases per", "paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize = False,", "Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state, '')", "dtype={\"fips\": str}) current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0] - 1] #", "s_id in enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id) else: state_id[n] =", "= new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True #", "# import math # log10_per100k = [] # for item in per100k: #", "# for item in per10k: # # print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] =", "case Georgia # state = \"Georgia\" # index_counter = 0 # for index,", "print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this is to remove", "fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html' %} {% block content %} <body", "per100k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop", "new_date) # df, current_date = load_data(when = i, yesterday=False) df, current_date = load_data(when", "= 650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return", "Per County<br>Using 2019 Census Estimations<br>'+date # ) # # fig.show() # # plot(fig)", "-*- coding: utf-8 -*- \"\"\" Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE", "list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county =", "list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states", "pop == 1: # per10k.append(1) # else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"]", "# this is to remove data from census that is missing from covid", "Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date # ) # #", "# current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday # current_date =", "item: # # print(item) # # print(pop_counties.shape) # states_col_for_county_pop = [] # for", "# pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia #", "specific_date_df[\"county_population\"] = population_counties_list per100k = [] for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): if", "pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"] == input_date:", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Jun 29 15:54:28 2020", "# for picking out a specific state, in this case Georgia # state", "== state: # # print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter]", "log10_per10k = [] for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k", "index, row in specific_date_df.iterrows(): if row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"]", "#3221 # per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", #", "math log10_per10k = [] for item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] =", "#%% def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date =", "for index, row in pop_counties.iterrows(): # one_state = '' # for state in", "that is missing from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county", "state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s", "# specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # # print(per100k) # per10k =", "0) f.write(line.rstrip('\\r\\n') + '\\n' + content) f.write('\\n{% endblock %}') #%% def load_data(when =", "\"deaths\", \"IFR\"], scope=\"usa\", labels = {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total", "# if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # #", "fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter += 1 if boo == True:", "state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list)) per100k = [] for", "= {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14,", "per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k # print(specific_date_df) # print(per100k) per10k = []", "#%% #Graveyard # def counties_heat_map(specific_date_df, date): # \"for showing data per county\" #", "maker\") # # print(counties) # counties = \"District of Columbia\" # counties_list.append(counties) #", "# # print(specific_date_df) # for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] =", "\"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in zip(fips_states_keys, fips_states_values):", "'<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date # ) #", "# plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main(): #[282519", "= new_date specific_date_df = make_df_for_date(input_date = current_date, df = df) fig = counties_heat_map(specific_date_df,", "# print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard #", "fips_county_ids.append(s + c) # print(fips_county_ids[1]) # print(len(county_id)) # print(len(state_id)) # print(len(fips_county_ids)) specific_date_df[\"county\"] =", "= [] # for item in per100k: # # print(item) # log10_per100k.append(math.log10(item)) #", "def load_data(when = 0, yesterday=True): df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = ''", "= list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in enumerate(county_id): if c_id <", "= pop_counties[\"Geographic Area\"].str.replace('Parish', '') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ',", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # # print(pop_counties) # #", "= pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state", "+= 1 # # print(index_counter) # print(specific_date_Georgia_df) # has all data for current", "print(\"huzzah\") # if state == \"District of Columbia\": # # print(\"aye\") # #", "100k Population Per<br>County Using 2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C',", "state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in", "from covid copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json')", "= False # # counter += 1 # if boo == True: #", "county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0]))", "per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k # # print(specific_date_df) # # print(per10k)", "\"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for c,s in zip(county_id,", "row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # counties = row[\"Geographic Area\"].replace(state,", "featureidkey = \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\",", "= {'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k Population", "= json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221 # # per county", "specific state, in this case Georgia # state = \"Georgia\" # index_counter =", "# for state, state_id in zip(fips_states_keys, fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) #", "Area\"].str.replace('District of Columbia District of Columbia', 'District of Columbia') # pop_counties[\"Geographic Area\"] =", "= [] # # counter = 0 # for s_county, s_state in zip(spec_county,", "2019 Census Estimations<br>'+date, titlefont = {\"size\": 15, \"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'),", "# print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) #", "for w in fips_states_keys] # these are in the data from kaggle but", "main(): #[282519 rows x 6 columns] dfmain.shape[0] old_date = '' for i in", "fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row in specific_date_df.iterrows():", "'') # DistrictOfColumbia # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia", "= [] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total =", "county cases # print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list", "# pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list = [] #", "1] # 6/29/2020 else: current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df):", "650 ) # fig.show() # plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig", "s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False # # counter +=", "# # for item in pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item:", "PIL import Image, ImageDraw # import PIL # import os # images =", "\"District of Columbia\" # counties_list.append(counties) # # for index, row in pop_counties.iterrows(): #", "specific_date_df.iterrows(): if row[\"state\"] == id_: # print(id_) total += row[\"cases\"] list_state_count.append(total) # break", "Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic", "== input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has all data for current", "# print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename) # images.append(f)", "# # counter = 0 # for s_county, s_state in zip(spec_county, spec_state): #", "DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) # spec_state = list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic", "for item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy()", "list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases population_counties_list = [] # counter", "# color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode = 'USA-states', # featureidkey =", "= list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases population_counties_list = [] #", "1: per10k.append(1) else: per10k.append(10000 * (count/pop)) specific_date_df[\"per10k\"] = per10k # print(specific_date_df) # print(per10k)", "x 6 # specific_date_df = specific_date_df.copy() IFR_list = [] for index, row in", "= list(specific_date_df[\"state\"]) # pop_county = list(pop_counties[\"Geographic Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county", "state == \"District of Columbia\": # # print(\"aye\") # # print(one_state) # one_state", ") fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for showing data", "print(index) # if row[\"state\"] == state: # # print(\"yes\") # # print(index) #", "fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019 Census", "# print(specific_date_Georgia_df) # # for picking out a specific state, in this case", "better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json import pandas", "# # locationmode = 'USA-states', # featureidkey = \"id\", # hover_name = \"county\",", "# print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0] - 1]) # print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] -", "per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping =", "counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {% extends 'base.html'", "of Columbia\": # # print(\"aye\") # # print(one_state) # one_state = \"District of", "range_color=(0, 5), # locationmode = 'USA-states', featureidkey = \"id\", hover_name = \"county_and_state\", hover_data", "fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] # these", "new_date != old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if", "or yesterday # current_date = df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with", "str}) current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020", "log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this is to remove data", "px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # # color_continuous_scale=\"Reds\", # color_continuous_scale=\"Viridis\", # range_color=(0,", "0 for spec_fips in spec_fips: boo = True for fips_census in fips_county_ids: if", "list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total = 0 for", "# # print(\"aye\") # # print(one_state) # one_state = \"District of Columbia\" #", "print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df)", "px import plotly from plotly.offline import plot import os import math if not", "else: current_date = df[\"date\"][when] return df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None,", "as response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0])) #3221", "+= row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states", "total = 0 for index, row in specific_date_df.iterrows(): if row[\"state\"] == id_: #", "else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) #", "color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3, 'rgb(149,207,216)'], [0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0,", "> 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list", "per county # fig = px.choropleth(copy_df, geojson=counties, # locations='fips', # color='log10_per100k', # #", "specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() # this is to remove data from", "list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in fips_states_keys] #", "item in per10k: # print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k", "row[\"Geographic Area\"]: # if row[\"Geographic Area\"].find(state) > 1: # one_state = state #", "= pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) # # for picking out a", "spec_fips: boo = True for fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)])", "False # counter += 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown", "json import pandas as pd import plotly.express as px import plotly from plotly.offline", "in pop_counties[\"Geographic Area\"]: # # if \"District \" in value: # # print(value)", "if pop == 1: per100k.append(1) else: per100k.append(100000 * (count/pop)) specific_date_df[\"cases_per100k\"] = per100k #", "# print(item) # log10_per10k.append(math.log10(item)) # specific_date_df[\"log10_per10k\"] = log10_per10k # # import math #", "item in per100k: # print(item) log10_per100k.append(math.log10(item)) specific_date_df[\"cases_per_log10_per100k\"] = log10_per100k copy_df = specific_date_df.copy() #", "2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import", "# print(pop_counties[\"Geographic Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC'))", "# # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop # # print(pop_counties) # counties_list =", "# if pop == 1: # per10k.append(1) # else: # per10k.append(10000 * (count/pop))", "# print(dfmain[\"date\"][1]) # current_date = dfmain[\"date\"][dfmain.shape[0] - 1] # 6/29/2020, or yesterday #", "# # for index, row in pop_counties.iterrows(): # # if row[\"Geographic Area\"] ==", "# # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic Area\"]", "= \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"], scope=\"usa\", labels =", "if yesterday = True # new_date = '2020-06-30' print(\"Date: \", new_date) # df,", "'') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = [] # unknown county cases", "print(len(counties_list)) # # print((counties_list)) # pop_counties[\"Geographic Area\"] = counties_list # # print(pop_counties) #", "pop_list = [] for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if row[\"State\"]", "row[\"cases\"] list_state_count.append(total) # break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"]", "# states_col_for_county_pop = [] # for index, row in pop_counties.iterrows(): # one_state =", "= row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"] = IFR_list # print(specific_date_df) specific_date_df", "\"color\":\"White\"}, paper_bgcolor='#4E5D6C', plot_bgcolor='#4E5D6C', geo=dict(bgcolor= 'rgba(0,0,0,0)', lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize =", "= pd.read_csv(my_file, header=0) # print(pop_states[\"State\"]) # print(states_only_df) pop_list = [] for state in", "# # print(index) # if row[\"state\"] == state: # # print(\"yes\") # #", "county_id[n] = \"00\"+str(c_id) elif c_id < 100: county_id[n] = \"0\"+str(c_id) else: county_id[n] =", "= True # new_date = '2020-06-30' print(\"Date: \", new_date) # df, current_date =", "== 0 and new_date != old_date: old_date = new_date new_date = dfmain[\"date\"][dfmain.shape[0] -", "pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1) #", "print(item) # # print(pop_counties.shape) # states_col_for_county_pop = [] # for index, row in", "specific_date_df[specific_date_df[\"state\"] != \"Puerto Rico\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Guam\"] for state, state_id in", "# counties_list = [] # for index, row in pop_counties.iterrows(): # for state", "copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: counties =", "plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main(): #[282519 rows x 6 columns]", "if __name__ == \"__main__\": main() # #%% # from PIL import Image, ImageDraw", "if boo == True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # #", "all data for current date with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) #", "print(len(population_counties_list)) # 3065 # # print(population_counties_list) # specific_date_df[\"county_population\"] = population_counties_list # # print(specific_date_df)", "# per100k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if", "pop_states.iterrows(): if row[\"State\"] == state: pop_list.append(row[\"Population\"]) states_only_df[\"state_pop\"] = pop_list # print(pop_list) # print(len(pop_list))", "Area\"]) # pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list = []", "for index, row in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] #", "# # print(index_counter) # print(specific_date_Georgia_df) # has all data for current date with", "is to remove data from census that is missing from covid # copy_df", "\"District \" in value: # # print(value) # # for item in pop_counties[\"Geographic", "cases # print(spec_fips) # print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k", "# # # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '') # # break", "= os.path.dirname(os.path.abspath(__file__)) dfmain = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) # print(dfmain.head) # print(dfmain.shape) # print(dfmain[\"date\"][dfmain.shape[0]", "county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"])", "in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo = False # counter +=", "list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in enumerate(county_id): if", "print(\"aye\") # # print(one_state) # one_state = \"District of Columbia\" # if one_state", "print(specific_date_df) specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df)", "if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop)", "zip(spec_county, spec_state): # boo = True # for p_county, p_state in zip(pop_county, pop_state):", "= json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for", "10: state_id[n] = \"0\"+str(s_id) else: state_id[n] = str(s_id) # print(county_id[57]) # print(state_id[600]) for", "= specific_date_df.iloc[index] # index_counter += 1 # # print(index_counter) # print(specific_date_Georgia_df) # has", "specific_date_df.copy() # this is to remove data from census that is missing from", "title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County Using 2019 Census Estimations<br>'+date,", "# Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response)", "# row[\"Geographic Area\"].replace(row[\"state\"], '') # # break # # print(len(counties_list)) # # print((counties_list))", "color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'], [0.3,", "100k Population Per County<br>Using 2019 Census Estimations<br>'+date # ) # # fig.show() #", "df = df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header", "log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k = [] for item in", "print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df) my_file", "county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t')", "# log10_per10k = [] # for item in per10k: # # print(item) #", "fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\") html_header = \"\"\" {%", "'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename)", "color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name =", "= df[\"date\"][10] # 6/29/2020 #%% def line_prepender(filename, line): with open(filename, 'r+') as f:", "one_state == \"Distric of Columbia\": # # print(\"huzzah\") # if state == \"District", "print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in zip(fips_states_keys, fips_states_values): specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id,", "+= 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases #", "# per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df) # #", "a specific state, in this case Georgia # state = \"Georgia\" # index_counter", "pop_state = list(pop_counties[\"state\"]) # population_per_county = list(pop_counties[2019]) # population_counties_list = [] # #", "# color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", # color_continuous_scale=\"hot\", # color_continuous_scale=\"ice\", # color_continuous_scale=\"thermal\", color_continuous_scale=[[0.0,'rgb(0,0,200)'],", "counties_heat_map(specific_date_df, date): # \"for showing data per county\" # my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx')", "in per100k: # # print(item) # log10_per100k.append(math.log10(item)) # specific_date_df[\"log10_per100k\"] = log10_per100k # copy_df", "copy_df[copy_df['log10_per100k'] != 0] # # Per county geojson # with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:", "if row[\"cases\"] > 0: IFR = row[\"deaths\"] / row[\"cases\"] IFR_list.append(IFR) else: IFR_list.append(0) specific_date_df[\"IFR\"]", "is to remove data from census that is missing from covid copy_df =", "fig.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0}) fig.show() plot(fig) return fig #%% def counties_heat_map(specific_date_df, date): \"for showing data per", "# counties = row[\"Geographic Area\"].replace(state, '') # if state == \"District of Columbia\":", "break print(list_state_count) print(len(list_state_count)) states_only_df[\"per_state_count\"] = list_state_count states_only_df[\"state_id\"] = list_str_states states_only_df[\"state_name\"] = fips_states_keys print(states_only_df)", "fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia') for w in", "font = {\"size\": 14, \"color\":\"White\"}, autosize = False, width = 800, height =", "specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k = [] for item in per100k:", "urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # # print((counties[\"features\"][0]))", "from census that is missing from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0]", "Area\"]: # states_col_for_county_pop.append(one_state) # # print(len(states_col_for_county_pop)) # # print(states_col_for_county_pop) # pop_counties[\"state\"] = states_col_for_county_pop", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('.', '') # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',',", "in df.iterrows(): if row[\"date\"] == input_date: specific_date_df.loc[df.index[index]] = df.iloc[index] # print(specific_date_df) # has", "print(states_only_df) pop_list = [] for state in states_only_df[\"state_name\"]: for i,row in pop_states.iterrows(): if", "state == \"District of Columbia\": # # print(\"trouble maker\") # # print(counties) #", "pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1) #", "{'cases_per_log10_per100k': 'log(cases/100k)'} ) fig.update_layout(margin={\"r\":5,\"t\":20,\"l\":5,\"b\":5}, title_text = '<br><br>Covid-19 Total Cases Per 100k Population Per<br>County", "Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of", "# counter += 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county", "per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221 #", "per10k.append(1) # else: # per10k.append(10000 * (count/pop)) # specific_date_df[\"per10k\"] = per10k # #", "as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys =", "= specific_date_df[specific_date_df[\"state\"] != \"Northern Mariana Islands\"] specific_date_df = specific_date_df[specific_date_df[\"state\"] != \"Virgin Islands\"] specific_date_df", "# # counter += 1 # if boo == True: # population_counties_list.append(1) #", "1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases # print(spec_fips)", "for id_ in list_str_states: total = 0 for index, row in specific_date_df.iterrows(): if", "fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\", #", "print(len(fips_county_ids)) specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia spec_fips = list(specific_date_df[\"fips\"]) odd_balls = []", "# with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response: # counties = json.load(response) # # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) #", "# boo = True # for p_county, p_state in zip(pop_county, pop_state): # if", "pop == 1: # per100k.append(1) # else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"]", "# # import math # log10_per100k = [] # for item in per100k:", "else: county_id[n] = str(c_id) for n, s_id in enumerate(state_id): if s_id < 10:", "# print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index] # index_counter += 1", "\"Distric of Columbia\": # # print(\"huzzah\") # if state == \"District of Columbia\":", "\"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ == \"__main__\": main() # #%% #", "df, current_date def make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in", "new_date = dfmain[\"date\"][dfmain.shape[0] - 1] # if yesterday = True # new_date =", "# if one_state == \"Distric of Columbia\": # # print(\"huzzah\") # if state", "Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request", "Area\"] == \"District of Columbia\": # # # print(\"sure\") #yes # # print(specific_date_df)", "in this case Georgia # state = \"Georgia\" # index_counter = 0 #", "specific_date_df[\"cases\"]): # if pop == 1: # per10k.append(1) # else: # per10k.append(10000 *", "10), # locationmode = 'USA-states', featureidkey = \"properties.STATE\", hover_name = \"state_name\", scope=\"usa\", labels={'per100k':'cases", "s_county == p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo = False", "= 0 # for index, row in specific_date_df.iterrows(): # # print(index) # if", "# else: # per100k.append(100000 * (count/pop)) # specific_date_df[\"per100k\"] = per100k # # print(specific_date_df)", "# if row[\"Geographic Area\"].find(state) > 1: # one_state = state # # if", "zip(states_only_df[\"state_pop\"], states_only_df[\"per_state_count\"]): per100k.append(100000 * (count/pop)) states_only_df[\"per100k\"] = per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response:", "color_continuous_scale=\"Viridis\", # range_color=(0, 5), # # locationmode = 'USA-states', # featureidkey = \"id\",", "as pd import plotly.express as px import plotly from plotly.offline import plot import", "\"id\", # hover_name = \"county\", # scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text", "in row[\"Geographic Area\"]: # # # print(\"oh yeah\") # # row[\"Geographic Area\"].replace(row[\"state\"], '')", "pop_counties = pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county =", "= county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df", "zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1) # else: # per100k.append(100000", "# # print(\"yes\") # # print(index) # # print(copy_new_df.index[index]) # specific_date_Georgia_df.loc[index_counter] = specific_date_df.iloc[index]", "Area\"] = pop_counties[\"Geographic Area\"].str.replace(' County', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace('", "# directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # # print(\"hi\") #", "'') # if state == \"District of Columbia\": # # print(\"trouble maker\") #", "# images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def counties_heat_map(specific_date_df,", "if yesterday: current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date = df[\"date\"][when]", "# one_state = \"District of Columbia\" # if one_state in row[\"Geographic Area\"]: #", "state fig = px.choropleth(states_only_df, geojson=states_mapping, locations='state_id', color='per100k', color_continuous_scale=\"Viridis\", # range_color=(0, 10), # locationmode", "Created on Mon Jun 29 15:54:28 2020 https://plotly.com/python/county-choropleth/?fbclid=IwAR1xOTSniBA_d1okZ-xEOa8eEeapK8AFTgWILshAnEvfLgJQPAhHgsVCIBE https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/", "1: # counties = row[\"Geographic Area\"].replace(state, '') # if state == \"District of", "= log10_per10k # import math log10_per100k = [] for item in per100k: #", "in range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0 and new_date != old_date:", "= pd.read_csv(open(my_file)) # print(pop_counties) county_id = list(pop_counties[\"COUNTY\"]) state_id = list(pop_counties[\"STATE\"]) population_per_county = list(pop_counties[\"POPESTIMATE2019\"])", "that is missing from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] # #", "boo = True for fips_census in fips_county_ids: if spec_fips == fips_census: population_counties_list.append(population_per_county[fips_county_ids.index(fips_census)]) boo", "lakecolor='#4E5D6C'), font = {\"size\": 14, \"color\":\"White\"}, autosize = False, width = 800, height", "make_df_for_date(input_date, df): specific_date_df = pd.DataFrame(data=None, columns=list(df.columns.values)) for index, row in df.iterrows(): if row[\"date\"]", "# print(item) log10_per10k.append(math.log10(item)) specific_date_df[\"log10_per10k\"] = log10_per10k # import math log10_per100k = [] for", "df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date = df[\"date\"][when] return df, current_date def", "pop_counties[\"Geographic Area\"]: # # if \"Virginia\" in item: # # print(item) # #", "print(one_state) # one_state = \"District of Columbia\" # if one_state in row[\"Geographic Area\"]:", "# print(len(population_counties_list)) # 3065 # print(population_counties_list) specific_date_df[\"county_population\"] = population_counties_list per100k = [] for", "block content %} <body style=\"background-color:black;color:white;\"> \"\"\" line_prepender('C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html', html_header) break #%% if __name__ ==", "one_state = \"District of Columbia\" # if one_state in row[\"Geographic Area\"]: # states_col_for_county_pop.append(one_state)", "df = pd.read_csv('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv', dtype={\"fips\": str}) current_date = '' if yesterday: current_date = df[\"date\"][df.shape[0]", "copy_df = copy_df[copy_df['cases_per_log10_per100k'] != 0] # Per county geojson with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:", "= per100k print(states_only_df) with open('gz_2010_us_040_00_20m.json') as response: states_mapping = json.load(response) print(states_mapping[\"features\"][0][\"properties\"][\"STATE\"]) print(len(states_mapping[\"features\"])) #3221", "fips_states_values): # pop_counties[\"state\"] = pop_counties[\"state\"].replace(state, state_id) # specific_date_df[\"county\"] = specific_date_df[\"county\"].str.replace('.', '') # DistrictOfColumbia", "= [] # unknown county cases population_counties_list = [] # counter = 0", "# per county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner", "', '') # # print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]: #", "print(counties) # counties = \"District of Columbia\" # counties_list.append(counties) # # for index,", "plot(fig,filename='covid_counties_'+date+'.html') plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/Current_counties.html') # plot(fig,filename='C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/templates/'+date+'_counties.html') # plot(fig) return fig #%% def main(): #[282519 rows", "# scope=\"usa\", # ) # fig.update_layout(margin={\"r\":5,\"t\":5,\"l\":5,\"b\":5}, # title_text = '<br><br>Covid-19 Spread Per 100k", "count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]): # if pop == 1: # per100k.append(1) # else:", "[] # for index, row in pop_counties.iterrows(): # one_state = '' # for", "os.mkdir(\"images_counties\") with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values =", "is missing from covid # copy_df = copy_df[copy_df['log10_per100k'] != 0] # # Per", "'USA-states', # featureidkey = \"id\", # hover_name = \"county\", # scope=\"usa\", # )", "def states_heat_map(specific_date_df): \"for showing data per state\" states_only_df = pd.DataFrame() list_state_count = []", "County<br>Using 2019 Census Estimations<br>'+date # ) # # fig.show() # # plot(fig) #", "if s_county == p_county and s_state == p_state: # population_counties_list.append(population_per_county[pop_county.index(p_county)]) # boo =", "specific_date_df = specific_date_df.reset_index(drop=True) # specific_date_Georgia_df = pd.DataFrame(data=None, columns=list(specific_date_df.columns.values)) # print(specific_date_df) # print(specific_date_Georgia_df) #", "population_per_county = list(pop_counties[\"POPESTIMATE2019\"]) fips_county_ids = [] for n, c_id in enumerate(county_id): if c_id", "= population_counties_list # # print(specific_date_df) # per100k = [] # for pop, count", "https://www.kaggle.com/fireballbyedimyrnmom/us-counties-covid-19-dataset better census data https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json import", "images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): #", "= fips_states_keys print(states_only_df) my_file = os.path.join(THIS_FOLDER, 'population_states_2019.txt') pop_states = pd.read_csv(my_file, header=0) # print(pop_states[\"State\"])", "county cases population_counties_list = [] # counter = 0 for spec_fips in spec_fips:", "https://www2.census.gov/programs-surveys/popest/datasets/2010-2019/counties/totals/ \"\"\" from urllib.request import urlopen import json import pandas as pd import", "json.load(response) # print(fips_states) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values()) fips_states_keys = [w.replace('ogia', 'orgia')", "Area\"].find(state) > 1: # one_state = state # # if one_state == \"Distric", "county fig = px.choropleth(copy_df, geojson=counties, locations='fips', color='cases_per_log10_per100k', # color_continuous_scale=\"icefire\", # winner # color_continuous_scale=\"Viridis\",", "# pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].str.replace(',', '') # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic", "counties = json.load(response) # print(counties[\"features\"][0][\"properties\"][\"STATE\"]) # print((counties[\"features\"][0])) #3221 # per county fig =", "# import math # log10_per10k = [] # for item in per10k: #", "= \"\"\" {% extends 'base.html' %} {% block content %} <body style=\"background-color:black;color:white;\"> \"\"\"", "+ content) f.write('\\n{% endblock %}') #%% def load_data(when = 0, yesterday=True): df =", "log10_per100k copy_df = specific_date_df.copy() # this is to remove data from census that", "counter += 1 if boo == True: population_counties_list.append(1) odd_balls.append(spec_fips) # unknown county cases", "counter += 1 # if boo == True: # population_counties_list.append(1) # # print(len(population_counties_list))", "specific_date_df['state_name'] = specific_date_df['state_name'].replace(state_id, state) county_and_state = [] for index, row in specific_date_df.iterrows(): c_and_s", "# for s_county, s_state in zip(spec_county, spec_state): # boo = True # for", "# my_file = os.path.join(THIS_FOLDER, 'population_counties_2019.xlsx') # pop_counties = pd.read_excel(open(my_file, 'rb'), index_col=None, sep='\\t') #", "Area\"] = pop_counties[\"Geographic Area\"].str.replace(' ', '') # DistrictOfColumbia # spec_county = list(specific_date_df[\"county\"]) #", "in value: # # print(value) # # for item in pop_counties[\"Geographic Area\"]: #", "# # if \"Virginia\" in item: # # print(item) # # print(pop_counties.shape) #", "# has all data for current date # 3067 x 6 # specific_date_df", "current_date, df = df) fig = counties_heat_map(specific_date_df, new_date) # states_heat_map(specific_date_df): # fig.write_image(\"images_counties/\"+new_date+\"_county_per100k.png\") fig.write_image(\"C:/Users/karas/.spyder-py3/Covid_Maps_Heroku/main_site_covid/pages/static/current_counties.png\")", "os.listdir(directory): # # print(\"hi\") # f = Image.open('C:/Users/karas/.spyder-py3/coronavirus/images_counties/'+filename) # # f = f.save(filename)", "def counties_heat_map(specific_date_df, date): \"for showing data per county\" my_file = os.path.join(THIS_FOLDER, 'all_census_data.csv') pop_counties", "# if state == \"District of Columbia\": # # print(\"trouble maker\") # #", "[0.5, 'rgb(234,252,258)'], [0.6, 'rgb(255,210,0)'], [1.0, 'rgb(200,0,0)']], range_color=(0, 5), # locationmode = 'USA-states', featureidkey", "PIL # import os # images = [] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' #", "== True: # population_counties_list.append(1) # # print(len(population_counties_list)) # 3065 # # print(population_counties_list) #", "# # if row[\"state\"] in row[\"Geographic Area\"]: # # # print(\"oh yeah\") #", "Area\"]) # # pop_counties[\"Geographic Area\"] = pop_counties[\"Geographic Area\"].map(lambda x: x.lstrip('. ,').rstrip('aAbBcC')) # pop_counties[\"Geographic", "# title_text = '<br><br>Covid-19 Spread Per 100k Population Per County<br>Using 2019 Census Estimations<br>'+date", "# specific_date_df = specific_date_df.copy() IFR_list = [] for index, row in specific_date_df.iterrows(): if", "per100k # print(specific_date_df) # print(per100k) per10k = [] for pop, count in zip(specific_date_df[\"county_population\"],", "[] # directory = 'C:/Users/karas/.spyder-py3/coronavirus/images_counties' # for filename in os.listdir(directory): # # print(\"hi\")", "specific_date_df[\"log10_per10k\"] = log10_per10k # # import math # log10_per100k = [] # for", "range(dfmain.shape[0]): new_date = dfmain[\"date\"][i] if i%50 == 0 and new_date != old_date: old_date", "for spec_fips in spec_fips: boo = True for fips_census in fips_county_ids: if spec_fips", "row[\"state_name\"] county_and_state.append(c_and_s) specific_date_df[\"county_and_state\"] = county_and_state return specific_date_df #%% def states_heat_map(specific_date_df): \"for showing data", "= \"id\", hover_name = \"county_and_state\", hover_data = [\"county_population\", \"cases\", \"cases_per100k\", \"cases_per_log10_per100k\", \"deaths\", \"IFR\"],", "in item: # # print(item) # # print(pop_counties.shape) # states_col_for_county_pop = [] #", "one_state = state # # if one_state == \"Distric of Columbia\": # #", "# # print(pop_counties) # # for value in pop_counties[\"Geographic Area\"]: # # if", "# print(specific_date_df) # per100k = [] # for pop, count in zip(specific_date_df[\"county_population\"], specific_date_df[\"cases\"]):", "str(c_id) for n, s_id in enumerate(state_id): if s_id < 10: state_id[n] = \"0\"+str(s_id)", "specific_date_df['state'] = specific_date_df['state'].replace(state, state_id) # print(specific_date_df) specific_date_df[\"state_name\"] = specific_date_df[\"state\"] for state, state_id in", "[] list_str_states = list(specific_date_df[\"state\"].unique()) # print(list_str_states) for id_ in list_str_states: total = 0", "# # print(counties) # counties = \"District of Columbia\" # counties_list.append(counties) # #", "= pop_counties[\"Geographic Area\"].str.replace('District of Columbia District of Columbia', 'District of Columbia') # pop_counties[\"Geographic", "print(pop_counties) # counties_list = [] # for index, row in pop_counties.iterrows(): # for", "print(len(images)) # images[0].save('covid_timeline_county_cases.gif', # save_all=True, append_images=images[1:], optimize=False, duration=500, loop=0) #%% #Graveyard # def", "with urlopen('https://gist.githubusercontent.com/wavded/1250983/raw/bf7c1c08f7b1596ca10822baeb8049d7350b0a4b/stateToFips.json') as response: fips_states = json.load(response) fips_states_keys = list(fips_states.keys()) fips_states_values = list(fips_states.values())", "current_date = df[\"date\"][df.shape[0] - 1] # 6/29/2020 else: current_date = df[\"date\"][when] return df," ]
[ "5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre", "ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0],", "master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] /", "range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV,", "key in master: for grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng in", "for key in master: for grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng", "0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def", "elif str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper() if SPP not in", "count == 0: ax_list.append((key, grade)) for ax in ax_list: del master[ax[0]][ax[1]] return master", "data[1][i - 1] master[\"TOTALS\"] = totals_temp for key in master: sums = [1,", "{} totals_temp = [0, 0, 0, 0, 0, 0, 0, 0, 0] for", "spp in self.species_list: master[spp] = {} master['TOTALS'] = {} # Formatting Grades and", "= tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'],", "= [0, 0, 0] master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng] =", "Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng: master[key][grade][rng] = [0, 0, 0]", "in self.summary_logs: spp = data[0] for i in range(1, len(data)): grade, rng =", "in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4]) HEIGHT = float(line[5])", "2, 3, 7, 8] for i in range(1, len(master[key])): if i in sums:", "rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding data to Master", "Species and Totals for key in master: for grade in Timber.GRADE_NAMES: master[key][grade] =", "any Grades that have zero data ax_list = [] for key in master:", "/ self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] +=", "= {} # Formatting Species into main keys for spp in self.species_list: master[spp]", "= CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog =", "SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT /", "i in sums: master[key][i] = master[key][i] / self.plots else: master[key][i] = master[key][i] /", "for grade in master[key]: count = 0 for rng in master[key][grade]: count +=", "master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF INITAL SPP LIST - [0SPPCOUNT,", "self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'],", "tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0],", "csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper()", "0] for data in self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0] +=", "0, 0] # Adding data to Master Dict for data in self.summary_logs: spp", "AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict):", "len(data[1]) + 1): master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i - 1]", "ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self):", "Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list = []", "self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT,", "in sums: master[key][i] = master[key][i] / self.plots else: master[key][i] = master[key][i] / master[key][0]", "master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for ax in ax_list: del master[ax[0]][ax[1]]", "7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add QMD to 2 index #", "= [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)],", "REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\",", "master['TOTALS'] = {} # Formatting Grades and Ranges in correct order, as nested", "Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF INITAL SPP", "121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)],", "+= (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j -", "else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0,", "= csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0] == \"\": break elif", "8CF_AC] master = {} totals_temp = [0, 0, 0, 0, 0, 0, 0,", "in master: for grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng:", "- 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots)", "+= (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j -", "if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPP =", "range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] +=", "21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv", "have zero data ax_list = [] for key in master: for grade in", "str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper() if SPP not in SPECIES_LIST:", "2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j", "to Master Dict for data in self.summary_logs: spp = data[0] for i in", "5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add QMD to 2", "2 index # ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC,", "float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) /", "dicts of Species and Totals for key in master: for grade in Timber.GRADE_NAMES:", "- 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots)", "SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species] for key in Log_Dict: rng", "class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30", "1): master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] =", "def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper()", "single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT)", "+= (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j -", "= int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES, DBH,", "6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp = [0, 0, 0, 0, 0,", "self.plots) # Removing any Grades that have zero data ax_list = [] for", "\"\": break elif str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper() if SPP", "HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib = 5 single =", "Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']]", "Species, Log_Dict): master = [Species] for key in Log_Dict: rng = \"\" for", "4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp = [0, 0, 0,", "= self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST =", "master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\",", "OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC,", "data[0] for i in range(1, len(data)): grade, rng = data[i][0], data[i][1] for j", "DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib = 5 single", "CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog = Min_Log", "next else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\": HEIGHT", "0, 0, 0, 0, 0, 0] for data in self.summary_conditions: spp = data[0]", "rng in master[key][grade]: count += master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for", "csv import math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\",", "[tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree =", "return master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20", "12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species,", "master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] /", "master: sums = [1, 2, 3, 7, 8] for i in range(1, len(master[key])):", "get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with open(self.csv, 'r') as tree_data: tree_data_reader", "(data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2]", "in self.species_list: master[spp] = {} master['TOTALS'] = {} # Formatting Grades and Ranges", "= {} # Formatting Grades and Ranges in correct order, as nested dicts", "for spp in self.species_list: master[spp] = [0, 0, 0, 0, 0, 0, 0,", "merch_dib < 5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre =", "\"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {} # Formatting Species into main", "- 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots)", "master[key]: count = 0 for rng in master[key][grade]: count += master[key][grade][rng][0] if count", "in self.species_list: master[spp] = [0, 0, 0, 0, 0, 0, 0, 0, 0]", "6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add QMD to 2 index", "ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {} # Formatting", "as nested dicts of Species and Totals for key in master: for grade", "self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog", "ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR,", "tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict()", "[] SPECIES_LIST = [] with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader)", "log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD']", "self.stand: next else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\":", "= {} for rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding", "/ .005454)) return master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30", "single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre =", "return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with open(self.csv, 'r') as", "SPECIES_LIST = [] with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for", "break elif str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper() if SPP not", "{} # Formatting Species into main keys for spp in self.species_list: master[spp] =", "if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4])", "line in tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next", "[\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv =", "in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list)", "in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {} for rng in", "rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master", "tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0,", "import Timber import csv import math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST", "== \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR =", "sums = [1, 2, 3, 7, 8] for i in range(1, len(master[key])): if", "Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng", "= Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict =", "tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]])", "tree.merch_dib() if merch_dib < 5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog)", "if count == 0: ax_list.append((key, grade)) for ax in ax_list: del master[ax[0]][ax[1]] return", "for j in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j", "INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in", "7, 8] for i in range(1, len(master[key])): if i in sums: master[key][i] =", "SPP = str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\":", "for data in self.summary_logs: spp = data[0] for i in range(1, len(data)): grade,", "= int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH", "grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng: master[key][grade][rng] = [0,", "str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5]", "range(1, len(data)): grade, rng = data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j", "+= 1 totals_temp[0] += 1 for i in range(1, len(data[1]) + 1): master[spp][i]", "in range(1, len(master[key])): if i in sums: master[key][i] = master[key][i] / self.plots else:", "ft\", \"1-10 ft\", 'TGRD'] master = {} # Formatting Species into main keys", "tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR),", "= \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0]", "self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j]", "= {} totals_temp = [0, 0, 0, 0, 0, 0, 0, 0, 0]", "self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = []", "\"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR", "\"1-10 ft\", 'TGRD'] master = {} # Formatting Species into main keys for", "else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree =", "self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j]", "log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding data to Master Dict for", "TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader)", "in master: sums = [1, 2, 3, 7, 8] for i in range(1,", "rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF INITAL", "def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with open(self.csv, 'r') as tree_data:", "'TGRD'] master = {} # Formatting Species into main keys for spp in", "+ 1): master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"]", "master[key][grade]: count += master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for ax in", "Pop SPPCOUNT and Add QMD to 2 index # ORDER OF FINAL SPP", "# ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR,", "\"\": break elif str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper() DBH =", "= data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j]", "self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre))", "for rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding data to", "(data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2]", "PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib", "= [] self.conditions_dict = {} self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list", "/ self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) # Removing any Grades", "[] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict = {} self.logs_dict = {}", "float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT =", "= data[0] master[spp][0] += 1 totals_temp[0] += 1 for i in range(1, len(data[1])", "DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib", "self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'],", "0 for rng in master[key][grade]: count += master[key][grade][rng][0] if count == 0: ax_list.append((key,", "DBH = float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else:", "and Ranges in correct order, as nested dicts of Species and Totals for", "= tree.merch_dib() if merch_dib < 5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog,", "6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5:", "get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\",", "as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0] ==", "index # ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT,", "HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES,", "# Removing any Grades that have zero data ax_list = [] for key", "self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict", "master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] /", "8] for i in range(1, len(master[key])): if i in sums: master[key][i] = master[key][i]", "= Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs =", "float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return", "[0, 0, 0, 0, 0, 0, 0, 0, 0] for spp in self.species_list:", "= {} self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ##", "= master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master", "master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] /", "grade, rng = data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j - 2]", "master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng", "next(tree_data_reader) for line in tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() !=", "range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand", "HDR_LIST = [] SPECIES_LIST = [] with open(self.csv, 'r') as tree_data: tree_data_reader =", "/ master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def get_logs_dict(self):", "- 1] master[\"TOTALS\"] = totals_temp for key in master: sums = [1, 2,", "[0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT", "(TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for", "for line in tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() != self.stand:", "line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper()", "tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'],", "= [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master", "for key in Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT']", "key in Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in", "self.species_list: master[spp] = [0, 0, 0, 0, 0, 0, 0, 0, 0] for", "range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1,", "self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree", "= self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r')", "2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add", "to 2 index # ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD,", "0, 0, 0, 0, 0] for data in self.summary_conditions: spp = data[0] master[spp][0]", "if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPECIES =", "master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0] #", "break elif str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper() DBH = float(line[4])", "self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with open(self.csv, 'r')", "key in master: for grade in master[key]: count = 0 for rng in", "ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\",", "ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR,", "- [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop", "ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return", "data to Master Dict for data in self.summary_logs: spp = data[0] for i", "self.summary_logs: spp = data[0] for i in range(1, len(data)): grade, rng = data[i][0],", "= data[0] for i in range(1, len(data)): grade, rng = data[i][0], data[i][1] for", "self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) # Removing any Grades that", "0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return", "LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21,", "in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng,", "0, 0, 0, 0] for data in self.summary_conditions: spp = data[0] master[spp][0] +=", "ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {} # Formatting Species into", "{} for rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding data", "AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with", "range(1, len(data[1]) + 1): master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i -", "\"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list", "# Adding data to Master Dict for data in self.summary_logs: spp = data[0]", "for i in range(1, len(data)): grade, rng = data[i][0], data[i][1] for j in", "master def get_conditions_dict(self): # ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC,", "SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT", "len(master[key])): if i in sums: master[key][i] = master[key][i] / self.plots else: master[key][i] =", "import math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41,", "LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master =", "8CF_AC] # After Pop SPPCOUNT and Add QMD to 2 index # ORDER", "from timberscale import Timber import csv import math ##### REPORT DATA MODULE class", "1 totals_temp[0] += 1 for i in range(1, len(data[1]) + 1): master[spp][i] +=", "main keys for spp in self.species_list: master[spp] = {} master['TOTALS'] = {} #", "1 for i in range(1, len(data[1]) + 1): master[spp][i] += data[1][i - 1]", "float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib()", "0] # Adding data to Master Dict for data in self.summary_logs: spp =", "= {} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND", "self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0] += 1 for i in", "count = 0 for rng in master[key][grade]: count += master[key][grade][rng][0] if count ==", "math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng = [\"40+ ft\",", "LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After", "and Totals for key in master: for grade in Timber.GRADE_NAMES: master[key][grade] = {}", ">= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib <", "Plots, Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog", "self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) /", "Totals for key in master: for grade in Timber.GRADE_NAMES: master[key][grade] = {} for", "{} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE", "else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\": HEIGHT =", "= [] for key in master: for grade in master[key]: count = 0", "that have zero data ax_list = [] for key in master: for grade", "[] self.conditions_dict = {} self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list =", "master[spp][0] += 1 totals_temp[0] += 1 for i in range(1, len(data[1]) + 1):", "AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species] for key in Log_Dict:", "self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0],", "QMD to 2 index # ORDER OF FINAL SPP LIST - [0TPA, 1BA_AC,", "- 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots)", "[] self.summary_logs = [] self.conditions_dict = {} self.logs_dict = {} self.report() def report(self):", "self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv,", "0, 0, 0, 0, 0] for spp in self.species_list: master[spp] = [0, 0,", "tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPP", "master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] /", "ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log,", "in master: for grade in master[key]: count = 0 for rng in master[key][grade]:", "master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) # Removing any Grades that have", "= {} for rng in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] =", "[\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots,", "= [] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict = {} self.logs_dict =", "/ self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] +=", "keys for spp in self.species_list: master[spp] = {} master['TOTALS'] = {} # Formatting", "tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR),", "= [0, 0, 0, 0, 0, 0, 0, 0, 0] for data in", "tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0] == \"\":", "with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader:", "HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR,", "for i in range(1, len(master[key])): if i in sums: master[key][i] = master[key][i] /", "into main keys for spp in self.species_list: master[spp] = {} master['TOTALS'] = {}", "MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader", "if line[5] != \"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH", "def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10", "and Add QMD to 2 index # ORDER OF FINAL SPP LIST -", "if merch_dib < 5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre", "##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40", "DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY", "in correct order, as nested dicts of Species and Totals for key in", "import csv import math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+", "in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2]", "## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST", "get_conditions_dict(self): # ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT,", "return master def get_conditions_dict(self): # ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA,", "0, 0, 0, 0, 0, 0] for spp in self.species_list: master[spp] = [0,", "master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng = [\"40+", "[\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10", "= ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def", "SPPCOUNT and Add QMD to 2 index # ORDER OF FINAL SPP LIST", "master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] /", "- 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots)", "master: for grade in master[key]: count = 0 for rng in master[key][grade]: count", "self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog,", "temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): #", "[0, 0, 0] master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng] = [0,", "in self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0] += 1 for i", "ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self,", "= totals_temp for key in master: sums = [1, 2, 3, 7, 8]", "/ 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self,", "len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species] for", "!= self.stand: next else: SPP = str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP)", "timberscale import Timber import csv import math ##### REPORT DATA MODULE class Report(object):", "in tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else:", "Formatting Grades and Ranges in correct order, as nested dicts of Species and", "Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ##", "Species into main keys for spp in self.species_list: master[spp] = {} master['TOTALS'] =", "data[0] master[spp][0] += 1 totals_temp[0] += 1 for i in range(1, len(data[1]) +", "'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0]", "def get_conditions_dict(self): # ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC,", "= tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib,", "/ master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40", "zero data ax_list = [] for key in master: for grade in master[key]:", "Add QMD to 2 index # ORDER OF FINAL SPP LIST - [0TPA,", "0, 0, 0] for spp in self.species_list: master[spp] = [0, 0, 0, 0,", "open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if", "str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH =", "totals_temp[0] += 1 for i in range(1, len(data[1]) + 1): master[spp][i] += data[1][i", "Master Dict for data in self.summary_logs: spp = data[0] for i in range(1,", "ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {}", "in Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng: master[key][grade][rng] = [0, 0,", "totals_temp for key in master: sums = [1, 2, 3, 7, 8] for", "int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >=", "for i in range(1, len(data[1]) + 1): master[spp][i] += data[1][i - 1] totals_temp[i]", "master: for grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng: master[key][grade][rng]", "4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add QMD to", "2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j", "len(data)): grade, rng = data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j -", "= float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2)", "in log_rng: master[key]['TTL'][rng] = [0, 0, 0] # Adding data to Master Dict", "int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT)", "for key in master: sums = [1, 2, 3, 7, 8] for i", "HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST),", "[[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20", "AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data)", "self.plog = Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs", "if line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5]))", "Adding data to Master Dict for data in self.summary_logs: spp = data[0] for", "sums: master[key][i] = master[key][i] / self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0)", "spp = data[0] master[spp][0] += 1 totals_temp[0] += 1 for i in range(1,", "= float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR = round(sum(HDR_LIST)", "data ax_list = [] for key in master: for grade in master[key]: count", "SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST =", "== \"\": break elif str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper() if", "+= data[1][i - 1] master[\"TOTALS\"] = totals_temp for key in master: sums =", "= Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions =", "len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j]", "[1, 2, 3, 7, 8] for i in range(1, len(master[key])): if i in", "Log_Dict): master = [Species] for key in Log_Dict: rng = \"\" for ranges", "Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF INITAL SPP LIST", "SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master", "2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j", "master[key][i] = master[key][i] / self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2,", "line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPP = str(line[3]).upper()", "Ranges in correct order, as nested dicts of Species and Totals for key", "== \"\": break elif str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper() DBH", "of Species and Totals for key in master: for grade in Timber.GRADE_NAMES: master[key][grade]", "log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR,", "__init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots", "self.plots = Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions", "Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF INITAL SPP LIST -", "[0, 0, 0] # Adding data to Master Dict for data in self.summary_logs:", "# Formatting Species into main keys for spp in self.species_list: master[spp] = {}", "round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master =", "rng = data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j - 2] +=", "Dict for data in self.summary_logs: spp = data[0] for i in range(1, len(data)):", "- [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {}", "master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def", "order, as nested dicts of Species and Totals for key in master: for", "+= 1 for i in range(1, len(data[1]) + 1): master[spp][i] += data[1][i -", "i in range(1, len(data)): grade, rng = data[i][0], data[i][1] for j in range(2,", "master = {} totals_temp = [0, 0, 0, 0, 0, 0, 0, 0,", "def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS)", "FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC]", "merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog,", "/ len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species]", "data[1][i - 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp for key", "nested dicts of Species and Totals for key in master: for grade in", "elif str(line[0]).upper() != self.stand: next else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if", "!= \"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12))", "self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER", "[Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER OF", "/ self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] +=", "for grade in Timber.GRADE_NAMES: master[key][grade] = {} for rng in log_rng: master[key][grade][rng] =", "spp = data[0] for i in range(1, len(data)): grade, rng = data[i][0], data[i][1]", "next else: SPP = str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5]", "\"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {} # Formatting Species", "41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]]", "DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31,", "+= (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j -", "Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog =", "else: SPP = str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] !=", "single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP,", "math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)],", "in master[key]: count = 0 for rng in master[key][grade]: count += master[key][grade][rng][0] if", "= [0, 0, 0, 0, 0, 0, 0, 0, 0] for spp in", "[] for key in master: for grade in master[key]: count = 0 for", "+= (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) # Removing", "tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict", "INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC]", "data in self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0] += 1 for", "ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV", "31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine,", "/ (DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST", "self.mlog = Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict", "line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR", "self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j]", "range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11,", "0] master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng] = [0, 0, 0]", "data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots)", "in range(1, len(data[1]) + 1): master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i", "if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list = [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'],", "key in master: sums = [1, 2, 3, 7, 8] for i in", "(data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2]", "Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs = []", "/ self.plots) # Removing any Grades that have zero data ax_list = []", "def get_log_list(self, Species, Log_Dict): master = [Species] for key in Log_Dict: rng =", "Grades and Ranges in correct order, as nested dicts of Species and Totals", "# After Pop SPPCOUNT and Add QMD to 2 index # ORDER OF", "/ self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0])", "Timber import csv import math ##### REPORT DATA MODULE class Report(object): LOG_RANGE_LIST =", "= Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]])", "0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self):", "\"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master = {} #", "self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ", "self.species_list = [] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict = {} self.logs_dict", "3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp = [0, 0,", "0, 0, 0] for data in self.summary_conditions: spp = data[0] master[spp][0] += 1", "range(1, len(master[key])): if i in sums: master[key][i] = master[key][i] / self.plots else: master[key][i]", "self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as", "1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp =", "= str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH", "OF FINAL SPP LIST - [0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC,", "7BF_AC, 8CF_AC] master = {} totals_temp = [0, 0, 0, 0, 0, 0,", "# Formatting Grades and Ranges in correct order, as nested dicts of Species", "- 2] += (data[i][j] / self.plots) # Removing any Grades that have zero", "= [1, 2, 3, 7, 8] for i in range(1, len(master[key])): if i", "< 5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib,", "READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader =", "master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\",", "+= (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j -", "[] with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in", "correct order, as nested dicts of Species and Totals for key in master:", "ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\", range(21, 31)], [\"11-20 ft\",", "tree_data_reader: if line[0] == \"\": break elif str(line[0]).upper() != self.stand: next else: SPECIES", "CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line", "{} master['TOTALS'] = {} # Formatting Grades and Ranges in correct order, as", "not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4]) HEIGHT =", "# ORDER OF INITAL SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR,", "master = {} # Formatting Species into main keys for spp in self.species_list:", "data[i][0], data[i][1] for j in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] /", "Min_Log self.species_list = [] self.summary_conditions = [] self.summary_logs = [] self.conditions_dict = {}", "for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng = ranges[0] temp_list =", "i in range(1, len(master[key])): if i in sums: master[key][i] = master[key][i] / self.plots", "2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) #", "[0TPA, 1BA_AC, 2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp", "{} self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN", "Removing any Grades that have zero data ax_list = [] for key in", "CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots =", "self.summary_logs = [] self.conditions_dict = {} self.logs_dict = {} self.report() def report(self): AVG_HDR,", "self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j]", "single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH, HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR),", "self.stand: next else: SPP = str(line[3]).upper() if SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if", "line[5] != \"\": DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH /", "+= (data[i][j] / self.plots) # Removing any Grades that have zero data ax_list", "master = [Species] for key in Log_Dict: rng = \"\" for ranges in", "tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES, DBH,", "0, 0, 0, 0, 0, 0, 0, 0] for spp in self.species_list: master[spp]", "0, 0] for spp in self.species_list: master[spp] = [0, 0, 0, 0, 0,", "tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict()", "if i in sums: master[key][i] = master[key][i] / self.plots else: master[key][i] = master[key][i]", "master[spp] = {} master['TOTALS'] = {} # Formatting Grades and Ranges in correct", "2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j", "str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0))", "in Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]:", "5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp = [0, 0, 0, 0,", "self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with", "\"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6])", "Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib = 5", "[Species] for key in Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST: if", "ax_list = [] for key in master: for grade in master[key]: count =", "in master[key][grade]: count += master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for ax", "i in range(1, len(data[1]) + 1): master[spp][i] += data[1][i - 1] totals_temp[i] +=", "= 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR)", "= master[key][i] / self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1]", "j in range(2, len(data[i])): master[spp][grade][rng][j - 2] += (data[i][j] / self.plots) master[spp][grade]['TGRD'][j -", "+= data[1][i - 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp for", "master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return master def get_logs_dict(self): log_rng =", "tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else: tree = Timber(SPECIES,", "STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = []", "tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line in tree_data_reader: if line[0] == \"\": break", "= float(line[6]) if DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib =", "0, 0] for data in self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0]", "report(self): AVG_HDR, self.species_list = self.get_HDR_Species() ## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION", "master[\"TOTALS\"] = totals_temp for key in master: sums = [1, 2, 3, 7,", "for rng in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {} for", "Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)], [\"21-30 ft\",", "MODULE class Report(object): LOG_RANGE_LIST = [[\"40+ ft\", range(41, 121)], [\"31-40 ft\", range(31, 41)],", "1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and", "self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'],", "tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog,", "(DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0: tree", "[\"40+ ft\", \"31-40 ft\", \"21-30 ft\", \"11-20 ft\", \"1-10 ft\", 'TGRD'] master =", "self.conditions_dict = self.get_conditions_dict() self.logs_dict = self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST", "/ self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j - 2] +=", "0, 0] master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng] = [0, 0,", "= Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list =", "for rng in master[key][grade]: count += master[key][grade][rng][0] if count == 0: ax_list.append((key, grade))", "PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'], tree_per_acre['CF_AC']]]) self.summary_logs.append(self.get_log_list(single['SPP'][0], log_per_acre)) else:", "0, 0, 0, 0, 0, 0, 0] for spp in self.species_list: master[spp] =", "2] += (data[i][j] / self.plots) # Removing any Grades that have zero data", "if DBH >= 6.0: tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if", "= [] with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data) next(tree_data_reader) for line", "for data in self.summary_conditions: spp = data[0] master[spp][0] += 1 totals_temp[0] += 1", "master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {} for rng in log_rng: master[key]['TTL'][rng]", "totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp for key in master: sums", "(data[i][j] / self.plots) # Removing any Grades that have zero data ax_list =", "(data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) # Removing any", "SPP LIST - [0SPPCOUNT, 1TPA, 2BA_AC, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] #", "[0, 0, 0, 0, 0, 0, 0, 0, 0] for data in self.summary_conditions:", "{} # Formatting Grades and Ranges in correct order, as nested dicts of", "Formatting Species into main keys for spp in self.species_list: master[spp] = {} master['TOTALS']", "1] master[\"TOTALS\"] = totals_temp for key in master: sums = [1, 2, 3,", "= round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master", "= str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR *", "log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {} for rng in log_rng:", "for key in master: for grade in master[key]: count = 0 for rng", "= self.get_logs_dict() return def get_HDR_Species(self): HDR_LIST = [] SPECIES_LIST = [] with open(self.csv,", "self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR)", "= [] SPECIES_LIST = [] with open(self.csv, 'r') as tree_data: tree_data_reader = csv.reader(tree_data)", "totals_temp = [0, 0, 0, 0, 0, 0, 0, 0, 0] for spp", "master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454)) return", "+= master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for ax in ax_list: del", "11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand =", "spp in self.species_list: master[spp] = [0, 0, 0, 0, 0, 0, 0, 0,", "(data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2]", "2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j", "= 0 for rng in master[key][grade]: count += master[key][grade][rng][0] if count == 0:", "self.conditions_dict = {} self.logs_dict = {} self.report() def report(self): AVG_HDR, self.species_list = self.get_HDR_Species()", "2) return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species] for key", "rng = \"\" for ranges in self.LOG_RANGE_LIST: if Log_Dict[key]['L_LGT'] in ranges[1]: rng =", "/ self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] +=", "HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if", "SPP not in SPECIES_LIST: SPECIES_LIST.append(SPP) if line[5] != \"\": DBH = float(line[4]) HEIGHT", "master[spp] = [0, 0, 0, 0, 0, 0, 0, 0, 0] for data", "range(11, 21)], [\"1-10 ft\", range(1, 11)]] def __init__(self, CSV, Stand_to_Examine, Plots, Pref_Log, Min_Log):", "self.species_list: master[spp] = {} master['TOTALS'] = {} # Formatting Grades and Ranges in", "PLOT_FACTOR) log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'],", "(data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j - 2]", "1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp for key in master:", "HEIGHT) self.summary_conditions.append([tree.SPP, [tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS", "/ self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] +=", "0, 0, 0, 0, 0, 0, 0, 0] for data in self.summary_conditions: spp", "in range(1, len(data)): grade, rng = data[i][0], data[i][1] for j in range(2, len(data[i])):", "tree = Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib", "[\"21-30 ft\", range(21, 31)], [\"11-20 ft\", range(11, 21)], [\"1-10 ft\", range(1, 11)]] def", "## MAIN READ AND TREE (TIMBER CLASS) INITIALIZATION with open(self.csv, 'r') as tree_data:", "5: merch_dib = 5 single = tree.tree_single(merch_dib, self.plog, self.mlog) tree_per_acre = tree.tree_acre(merch_dib, self.plog,", "- 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] / self.plots)", "merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib = 5 single = tree.tree_single(merch_dib,", "master[spp][i] += data[1][i - 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp", "data in self.summary_logs: spp = data[0] for i in range(1, len(data)): grade, rng", "= [Species] for key in Log_Dict: rng = \"\" for ranges in self.LOG_RANGE_LIST:", "self.plots) master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j]", "= [] self.summary_logs = [] self.conditions_dict = {} self.logs_dict = {} self.report() def", "- 1] totals_temp[i] += data[1][i - 1] master[\"TOTALS\"] = totals_temp for key in", "for spp in self.species_list: master[spp] = {} master['TOTALS'] = {} # Formatting Grades", "(data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2]", "count += master[key][grade][rng][0] if count == 0: ax_list.append((key, grade)) for ax in ax_list:", "rng in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {} for rng", "+= (data[i][j] / self.plots) master[spp][grade]['TGRD'][j - 2] += (data[i][j] / self.plots) master[spp]['TTL'][rng][j -", "2] += (data[i][j] / self.plots) master['TOTALS']['TTL'][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS']['TTL']['TGRD'][j", "[tree.get_TPA(PLOT_FACTOR), tree.get_BA_acre(PLOT_FACTOR), tree.get_RD_acre(PLOT_FACTOR), tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict =", "grade in master[key]: count = 0 for rng in master[key][grade]: count += master[key][grade][rng][0]", "else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] / master[key][0]) / .005454))", "3, 7, 8] for i in range(1, len(master[key])): if i in sums: master[key][i]", "Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log", "3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] # After Pop SPPCOUNT and Add QMD", "= {} master['TOTALS'] = {} # Formatting Grades and Ranges in correct order,", "DBH = float(line[4]) HEIGHT = float(line[5]) HDR_LIST.append(HEIGHT / (DBH / 12)) AVG_HDR =", "ft\", 'TGRD'] master = {} # Formatting Species into main keys for spp", "(DBH / 12)) AVG_HDR = round(sum(HDR_LIST) / len(HDR_LIST), 2) return AVG_HDR, SPECIES_LIST def", "= tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'], tree_per_acre['BF_AC'],", "tree.HGT, tree.HDR, 0, 0, 0]]) ## SUMMARY STATISTICS self.conditions_dict = self.get_conditions_dict() self.logs_dict =", "0, 0, 0, 0, 0, 0, 0] for data in self.summary_conditions: spp =", "After Pop SPPCOUNT and Add QMD to 2 index # ORDER OF FINAL", "SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR", "master[key][grade] = {} for rng in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL']", "= [0, 0, 0] # Adding data to Master Dict for data in", "* (DBH/12),0)) else: HEIGHT = int(float(line[5])) PLOT_FACTOR = float(line[6]) if DBH >= 6.0:", ".005454)) return master def get_logs_dict(self): log_rng = [\"40+ ft\", \"31-40 ft\", \"21-30 ft\",", "0] for spp in self.species_list: master[spp] = [0, 0, 0, 0, 0, 0,", "master[key][i] / self.plots else: master[key][i] = master[key][i] / master[key][0] master[key].pop(0) master[key].insert(2, math.sqrt((master[key][1] /", "0, 0, 0, 0] for spp in self.species_list: master[spp] = [0, 0, 0,", "self.summary_conditions = [] self.summary_logs = [] self.conditions_dict = {} self.logs_dict = {} self.report()", "- 2] += (data[i][j] / self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots)", "{} for rng in log_rng: master[key][grade][rng] = [0, 0, 0] master[key]['TTL'] = {}", "= [Log_Dict[key]['L_GRD'][0], rng, Log_Dict[key]['L_CT_AC'], Log_Dict[key]['L_BF_AC'], Log_Dict[key]['L_CF_AC']] master.append(temp_list) return master def get_conditions_dict(self): # ORDER", "Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list = [] self.summary_conditions = []", "2QMD, 3RD_AC, 4T_HGT, 5HDR, 6VBAR, 7BF_AC, 8CF_AC] master = {} totals_temp = [0,", "log_per_acre = tree.log_acre(merch_dib, self.plog, self.mlog, PLOT_FACTOR) self.summary_conditions.append([single['SPP'][0], [tree_per_acre['TPA'], tree_per_acre['BA_AC'], tree_per_acre['RD_AC'], single['T_HGT'], single['HDR'], single['VBAR'],", "master[key]['TTL'][rng] = [0, 0, 0] # Adding data to Master Dict for data", "Stand_to_Examine, Plots, Pref_Log, Min_Log): self.csv = CSV self.stand = Stand_to_Examine.upper() self.plots = Plots", "!= self.stand: next else: SPECIES = str(line[3]).upper() DBH = float(line[4]) if line[5] ==", "Grades that have zero data ax_list = [] for key in master: for", "return AVG_HDR, SPECIES_LIST def get_log_list(self, Species, Log_Dict): master = [Species] for key in", "get_log_list(self, Species, Log_Dict): master = [Species] for key in Log_Dict: rng = \"\"", "self.stand = Stand_to_Examine.upper() self.plots = Plots self.plog = Pref_Log self.mlog = Min_Log self.species_list", "= float(line[4]) if line[5] == \"\": HEIGHT = int(round(AVG_HDR * (DBH/12),0)) else: HEIGHT", "self.plots) master['TOTALS'][grade][rng][j - 2] += (data[i][j] / self.plots) master['TOTALS'][grade]['TGRD'][j - 2] += (data[i][j]", "master[spp]['TTL'][rng][j - 2] += (data[i][j] / self.plots) master[spp]['TTL']['TGRD'][j - 2] += (data[i][j] /", "= Timber(SPECIES, DBH, HEIGHT) merch_dib = tree.merch_dib() if merch_dib < 5: merch_dib =" ]
[ "import Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError def __iter__(self):", "__len__(self): raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self,", "NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError def set_target_device(self, device):", "raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError def set_target_device(self,", "-*- coding: utf-8 -*- from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple'", "raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab):", "-*- from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise", "def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def", "class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError", "coding: utf-8 -*- from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple' def", "from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError", "DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError def", "def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError def set_target_device(self, device): raise", "cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError def", "TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise", "Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self): raise TypeError def __iter__(self): raise", "iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError def set_target_device(self, device): raise NotImplementedError", "default_implementation = 'simple' def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self):", "__iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError def", "= 'simple' def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise", "def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError def index_with(self, vocab): raise NotImplementedError", "# -*- coding: utf-8 -*- from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation =", "'simple' def __len__(self): raise TypeError def __iter__(self): raise NotImplementedError def iter_instances(self): raise NotImplementedError", "utf-8 -*- from cnlp.common.registrable import Registrable class DataLoader(Registrable): default_implementation = 'simple' def __len__(self):" ]
[ "script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\": # read", "sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\": # read configuration from environment", "% (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd = make_server(webservice_host, webservice_port, app) httpd.serve_forever()", "from __future__ import absolute_import, print_function import os import sys from wsgiref.simple_server import make_server", "else: raise \"Unknown factory type '%s'\" % factory_type print(\"Starting webservice on %s:%d\" %", "<gh_stars>1-10 #! /usr/bin/env python from __future__ import absolute_import, print_function import os import sys", "path before importing try: import geomagio # noqa (tells linter to ignore this", "import geomagio # noqa (tells linter to ignore this line.) except ImportError: path", "raise \"Unknown factory type '%s'\" % factory_type print(\"Starting webservice on %s:%d\" % (webservice_host,", "int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\",", "read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type", "webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type", "\"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\" % factory_type", "import geomagio if __name__ == \"__main__\": # read configuration from environment edge_host =", "ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__", "os import sys from wsgiref.simple_server import make_server # ensure geomag is on the", "= os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host =", "#! /usr/bin/env python from __future__ import absolute_import, print_function import os import sys from", "webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) #", "import absolute_import, print_function import os import sys from wsgiref.simple_server import make_server # ensure", "= path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\": # read configuration", "if __name__ == \"__main__\": # read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\")", "__future__ import absolute_import, print_function import os import sys from wsgiref.simple_server import make_server #", "# configure factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise", "== \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\" %", "on the path before importing try: import geomagio # noqa (tells linter to", "python from __future__ import absolute_import, print_function import os import sys from wsgiref.simple_server import", "make_server # ensure geomag is on the path before importing try: import geomagio", "# read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\"))", "configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type =", "factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version", "if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type", "os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port)", "ensure geomag is on the path before importing try: import geomagio # noqa", "this line.) except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import", "geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\" % factory_type print(\"Starting webservice on", "/usr/bin/env python from __future__ import absolute_import, print_function import os import sys from wsgiref.simple_server", "print_function import os import sys from wsgiref.simple_server import make_server # ensure geomag is", "% factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version)", "factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\"", "port=edge_port) else: raise \"Unknown factory type '%s'\" % factory_type print(\"Starting webservice on %s:%d\"", "path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ ==", "os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\",", "from wsgiref.simple_server import make_server # ensure geomag is on the path before importing", "os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory", "= int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port =", "before importing try: import geomagio # noqa (tells linter to ignore this line.)", "os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\",", "ignore this line.) except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\")))", "(tells linter to ignore this line.) except ImportError: path = os.path script_dir =", "= os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version =", "the path before importing try: import geomagio # noqa (tells linter to ignore", "sys from wsgiref.simple_server import make_server # ensure geomag is on the path before", "line.) except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio", "None) # configure factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else:", "\"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None)", "importing try: import geomagio # noqa (tells linter to ignore this line.) except", "edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port", "\"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\"))", "\"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type == \"edge\": factory", "geomagio # noqa (tells linter to ignore this line.) except ImportError: path =", "factory type '%s'\" % factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app", "to ignore this line.) except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir,", "geomag is on the path before importing try: import geomagio # noqa (tells", "import make_server # ensure geomag is on the path before importing try: import", "is on the path before importing try: import geomagio # noqa (tells linter", "environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\")", "from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\",", "version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type == \"edge\": factory =", "\"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if", "'%s'\" % factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory,", "try: import geomagio # noqa (tells linter to ignore this line.) except ImportError:", "\"Unknown factory type '%s'\" % factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port))", "linter to ignore this line.) except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__))", "edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host", "os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\": #", "geomagio if __name__ == \"__main__\": # read configuration from environment edge_host = os.getenv(\"EDGE_HOST\",", "== \"__main__\": # read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port =", "noqa (tells linter to ignore this line.) except ImportError: path = os.path script_dir", "\"..\"))) import geomagio if __name__ == \"__main__\": # read configuration from environment edge_host", "webservice on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd = make_server(webservice_host,", "absolute_import, print_function import os import sys from wsgiref.simple_server import make_server # ensure geomag", "configure factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown", "import sys from wsgiref.simple_server import make_server # ensure geomag is on the path", "= os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\") webservice_port = int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure", "%s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd = make_server(webservice_host, webservice_port, app)", "on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd = make_server(webservice_host, webservice_port,", "int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type == \"edge\":", "= os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host,", "= geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\" % factory_type print(\"Starting webservice", "factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory type '%s'\" % factory_type print(\"Starting", "path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\": # read configuration from", "factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd", "import os import sys from wsgiref.simple_server import make_server # ensure geomag is on", "# ensure geomag is on the path before importing try: import geomagio #", "= int(os.getenv(\"GEOMAG_WEBSERVICE_PORT\", \"7981\")) version = os.getenv(\"GEOMAG_VERSION\", None) # configure factory if factory_type ==", "__name__ == \"__main__\": # read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port", "= os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if __name__ == \"__main__\":", "except ImportError: path = os.path script_dir = path.dirname(path.abspath(__file__)) sys.path.append(path.normpath(path.join(script_dir, \"..\"))) import geomagio if", "\"__main__\": # read configuration from environment edge_host = os.getenv(\"EDGE_HOST\", \"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\",", "type '%s'\" % factory_type print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app =", "print(\"Starting webservice on %s:%d\" % (webservice_host, webservice_port)) app = geomagio.WebService(factory, version) httpd =", "wsgiref.simple_server import make_server # ensure geomag is on the path before importing try:", "# noqa (tells linter to ignore this line.) except ImportError: path = os.path", "factory if factory_type == \"edge\": factory = geomagio.edge.EdgeFactory(host=edge_host, port=edge_port) else: raise \"Unknown factory", "\"cwbpub.cr.usgs.gov\") edge_port = int(os.getenv(\"EDGE_PORT\", \"2060\")) factory_type = os.getenv(\"GEOMAG_FACTORY_TYPE\", \"edge\") webservice_host = os.getenv(\"GEOMAG_WEBSERVICE_HOST\", \"\")" ]
[ "COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None", "RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for i, k in enumerate(expressions[::2]): d[k]", "p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6]))", "p[5] for i, k in enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1]", ": O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof():", "a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression' def p_expression(p): \"\"\"expression : atom", ": expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\"", "Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p): if p is None:", ": S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null :", "= Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p): if p is", "\"\"\"expression : atom | associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom : integer", "COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF", "def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def", "d[k] = expressions[i * 2 + 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions", "p[0] = p[1] def p_atom(p): \"\"\"atom : integer | float | boolean |", "= OrderedDict() expressions = p[5] for i, k in enumerate(expressions[::2]): d[k] = expressions[i", "LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for i, k in", "| string | null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative : array |", "| associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom : integer | float |", "object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] =", "N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2]", "None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array", "INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] =", "p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p):", "def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float", "def p_error(p): if p is None: eof() else: raise RuntimeError(str(p)) def parse(text): parser", "p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean :", "\"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p): \"\"\"string", "d = OrderedDict() expressions = p[5] for i, k in enumerate(expressions[::2]): d[k] =", "INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER", "STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def", "UTF-8 -*- import ply.yacc from collections import OrderedDict from .lexer import tokens, lex", "import ply.yacc from collections import OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;}", ".models import Object start = 'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\"", "\"\"\"associative : array | object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL", "+ 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] =", "expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0]", "ply.yacc from collections import OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from", "associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom : integer | float | boolean", "p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0]", "p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p):", ": COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5]", "else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression = parser.parse(text, lexer=lex()) return", "COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def", "\"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL", "for i, k in enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1] p[0]", "# a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression' def p_expression(p): \"\"\"expression :", "def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def", "RuntimeError('EOF Reached') def p_error(p): if p is None: eof() else: raise RuntimeError(str(p)) def", "array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for i, k in enumerate(expressions[::2]):", "COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for i, k", "= float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] !=", "p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array :", "SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON", "def p_collection(p): \"\"\"associative : array | object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer", "p[1] def p_atom(p): \"\"\"atom : integer | float | boolean | string |", "p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d =", "COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0]", "float | boolean | string | null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative", "S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\"", "p[0] = p[1] def p_collection(p): \"\"\"associative : array | object\"\"\" p[0] = p[1]", "integer | float | boolean | string | null\"\"\" p[0] = p[1] def", "\"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null", "COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON", "= d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p):", "Reached') def p_error(p): if p is None: eof() else: raise RuntimeError(str(p)) def parse(text):", "p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float :", "boolean | string | null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative : array", "COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0]", ".lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression'", "FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] =", "\"\"\"atom : integer | float | boolean | string | null\"\"\" p[0] =", "def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5] def", "i, k in enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1] p[0] =", "D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\"", "p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def", "| boolean | string | null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative :", "2 + 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0]", "= 'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] = p[1] def", "+ p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0]", "p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3])", "-*- import ply.yacc from collections import OrderedDict from .lexer import tokens, lex #", "STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p):", "= p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d", "expressions[i * 2 + 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression", "p_collection(p): \"\"\"associative : array | object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer :", "= [p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING", ": atom | associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom : integer |", "\"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON", "1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]]", "\"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object", "eof() else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression = parser.parse(text, lexer=lex())", "p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]]", "[p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3]", "\"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL", ": B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p): \"\"\"string :", "in enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1] p[0] = d def", "is None: eof() else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression =", "p is None: eof() else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression", "p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p): if p", "eof(): raise RuntimeError('EOF Reached') def p_error(p): if p is None: eof() else: raise", ": I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON", "coding: UTF-8 -*- import ply.yacc from collections import OrderedDict from .lexer import tokens,", "p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\"", "raise RuntimeError('EOF Reached') def p_error(p): if p is None: eof() else: raise RuntimeError(str(p))", "def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5],", "if p is None: eof() else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc()", "p[0] = None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def", "* 2 + 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\"", "p_atom(p): \"\"\"atom : integer | float | boolean | string | null\"\"\" p[0]", "p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p):", "import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression' def", "!= \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] =", "\"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def", "= p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array", "| object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0]", "p[1] def p_collection(p): \"\"\"associative : array | object\"\"\" p[0] = p[1] def p_integer(p):", "INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached')", "k in enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1] p[0] = d", ": D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON", "= p[5] for i, k in enumerate(expressions[::2]): d[k] = expressions[i * 2 +", "def eof(): raise RuntimeError('EOF Reached') def p_error(p): if p is None: eof() else:", "p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom :", "# -*- coding: UTF-8 -*- import ply.yacc from collections import OrderedDict from .lexer", "'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] = p[1] def p_atom(p):", "\"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\" p[0] = p[5]", "p[0] = p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON", "= int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def", "p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON", "expressions = p[5] for i, k in enumerate(expressions[::2]): d[k] = expressions[i * 2", "= p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def", "| null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative : array | object\"\"\" p[0]", "\"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER", "= p[1] def p_atom(p): \"\"\"atom : integer | float | boolean | string", "dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p): if p is None: eof()", "OrderedDict() expressions = p[5] for i, k in enumerate(expressions[::2]): d[k] = expressions[i *", "import Object start = 'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0]", "= p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL COLON INTEGER COLON STRING\"\"\"", "O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise", "-*- coding: UTF-8 -*- import ply.yacc from collections import OrderedDict from .lexer import", "def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom", "string | null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative : array | object\"\"\"", "p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3])", "B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\" def p_string(p): \"\"\"string : S_SYMBOL", "tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression' def p_expression(p):", "A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET", "p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\" p[0] =", "COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0]", "None: eof() else: raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression = parser.parse(text,", "d def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions", "= p[1] def p_collection(p): \"\"\"associative : array | object\"\"\" p[0] = p[1] def", ": array | object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON", "p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\"", "lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start = 'expression' def p_expression(p): \"\"\"expression", "def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean", "from collections import OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models", "OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start", "start = 'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] = p[1]", "array | object\"\"\" p[0] = p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\"", "int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\" p[0] = float(p[3]) def p_boolean(p):", "= [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] +", "INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for i,", "def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict()", "raise RuntimeError(str(p)) def parse(text): parser = ply.yacc.yacc() expression = parser.parse(text, lexer=lex()) return expression", "def p_atom(p): \"\"\"atom : integer | float | boolean | string | null\"\"\"", "INTEGER COLON STRING\"\"\" p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] =", "float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3] != \"0\"", "\"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0]", "enumerate(expressions[::2]): d[k] = expressions[i * 2 + 1] p[0] = d def p_array_expressions_array_expression(p):", "array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER", "SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression SEMICOLON array_expressions\"\"\" p[0] =", "p[0] = float(p[3]) def p_boolean(p): \"\"\"boolean : B_SYMBOL COLON INTEGER\"\"\" p[0] = p[3]", "p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON", "Object start = 'expression' def p_expression(p): \"\"\"expression : atom | associative\"\"\" p[0] =", "def p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions :", "\"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions =", "null\"\"\" p[0] = p[1] def p_collection(p): \"\"\"associative : array | object\"\"\" p[0] =", "collections import OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import", "raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions", ": A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON", "p_raw_array(p): \"\"\"raw_array : COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions", "= expressions[i * 2 + 1] p[0] = d def p_array_expressions_array_expression(p): \"\"\"array_expressions :", "| float | boolean | string | null\"\"\" p[0] = p[1] def p_collection(p):", "from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object start =", "I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p): \"\"\"float : D_SYMBOL COLON FLOAT\"\"\"", "import OrderedDict from .lexer import tokens, lex # a:4:{s:4:\"date\";s:10:\"2019-12-29\";s:10:\"type_fonds\";s:11:\"arko_seriel\";s:4:\"ref1\";i:12;s:4:\"ref2\";i:4669;} from .models import Object", ": expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object :", "raw_array\"\"\" p[0] = Object(p[5], dict(p[6])) def eof(): raise RuntimeError('EOF Reached') def p_error(p): if", "from .models import Object start = 'expression' def p_expression(p): \"\"\"expression : atom |", ": integer | float | boolean | string | null\"\"\" p[0] = p[1]", "p[0] = p[5] def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p):", "def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p): \"\"\"raw_array :", "p_error(p): if p is None: eof() else: raise RuntimeError(str(p)) def parse(text): parser =", "COLON INTEGER COLON LEFT_BRACKET array_expressions RIGHT_BRACKET\"\"\" d = OrderedDict() expressions = p[5] for", "atom | associative\"\"\" p[0] = p[1] def p_atom(p): \"\"\"atom : integer | float", ": N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] =", "p_array_expressions_array_expression(p): \"\"\"array_expressions : expression SEMICOLON\"\"\" p[0] = [p[1]] def p_array_expressions_array_expression_array_expressions(p): \"\"\"array_expressions : expression", "= None def p_array(p): \"\"\"array : A_SYMBOL raw_array\"\"\" p[0] = p[2] def p_raw_array(p):", "[p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL COLON INTEGER COLON STRING raw_array\"\"\"", "expression SEMICOLON array_expressions\"\"\" p[0] = [p[1]] + p[3] def p_object(p): \"\"\"object : O_SYMBOL", "p[1] def p_integer(p): \"\"\"integer : I_SYMBOL COLON INTEGER\"\"\" p[0] = int(p[3]) def p_float(p):", "def p_null(p): \"\"\"null : N_SYMBOL\"\"\" p[0] = None def p_array(p): \"\"\"array : A_SYMBOL" ]
[ "search is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized", "del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns", "f'/groups' search = kwargs.get('search') if search is not None: del kwargs['search'] url +=", "def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive') def get_group_drives(self, group_id):", "a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self,", "self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive') def get_group_drives(self,", "f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}')", "list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive') def get_group_drives(self, group_id): return", "None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id):", "class MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if", "is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def", "+= f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a group''' return", "def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects'''", "group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id):", "list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self,", "object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive')", "def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def", "self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}')", "'''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if search is not None: del", "'''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def", "if search is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs)", "= kwargs.get('search') if search is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return", "'''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return", "url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a group'''", "return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def", "memoized class MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search')", "from .cache import memoized class MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups'", "= f'/groups' search = kwargs.get('search') if search is not None: del kwargs['search'] url", "@memoized def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns", "MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if search", "**kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if search is not None:", "kwargs.get('search') if search is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url,", "directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners')", "not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self,", "list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if search is not", "return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return", ".cache import memoized class MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search", "url = f'/groups' search = kwargs.get('search') if search is not None: del kwargs['search']", "self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self,", "group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members')", "self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self,", "group_id): '''returns directoryObjects''' return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id):", "def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search = kwargs.get('search') if search is", "return self.get_paged(f'/groups/{group_id}/members') def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def", "def get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id):", "return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive') def", "import memoized class MgraphConnectorGroupMixin: def list_groups(self, **kwargs): '''https://docs.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0''' url = f'/groups' search =", "**kwargs) @memoized def get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id):", "group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return self.get(f'/groups/{group_id}/drive') def get_group_drives(self, group_id): return self.get(f'/groups/{group_id}/drives')", "search = kwargs.get('search') if search is not None: del kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\"", "get_group(self, group_id): '''returns a group''' return self.get(f'/groups/{group_id}') def list_group_members(self, group_id): '''returns directoryObjects''' return", "get_directoryobject(self, object_id): return self.get(f'/directoryObjects/{object_id}') def list_group_owners(self, group_id): return self.get_paged(f'/groups/{group_id}/owners') def get_group_drive(self, group_id): return", "kwargs['search'] url += f\"?$filter=startswith(displayName,'{search}')\" return self.get_paged(url, **kwargs) @memoized def get_group(self, group_id): '''returns a" ]
[ "(Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the dark circles under", "one! Did you see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0# 20 exp\")", "with the dark circles under your eyes? Didn't sleep well? Huh? A strange", "to interpret dreams, but that sounds like a good one! Did you see", "sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't know how to interpret dreams,", "the dark circles under your eyes? Didn't sleep well? Huh? A strange dream?", "Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I", "I don't know how to interpret dreams, but that sounds like a good", "Strange dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the dark", "| Strange dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the", "intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the dark circles under your", "A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't", "dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't know", "know how to interpret dreams, but that sounds like a good one! Did", "sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the dark circles under your eyes?", "dreams, but that sounds like a good one! Did you see a dog", "that sounds like a good one! Did you see a dog in your", "good one! Did you see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0# 20", "22000 | Strange dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with", "a good one! Did you see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0#", "well? Huh? A strange dream? What was it about? Whoa? A dream about", "eyes? Didn't sleep well? Huh? A strange dream? What was it about? Whoa?", "sounds like a good one! Did you see a dog in your dream,", "<reponame>G00dBye/YYMS<filename>scripts/quest/q22000e.py # 22000 | Strange dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up?", "your eyes? Didn't sleep well? Huh? A strange dream? What was it about?", "Evan. You up? What's with the dark circles under your eyes? Didn't sleep", "dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't know how to interpret", "dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's with the dark circles", "you see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0# 20 exp\") sm.giveExp(20) sm.completeQuest(parentID)", "dark circles under your eyes? Didn't sleep well? Huh? A strange dream? What", "Didn't sleep well? Huh? A strange dream? What was it about? Whoa? A", "You up? What's with the dark circles under your eyes? Didn't sleep well?", "sm.sendNext(\"Hey, Evan. You up? What's with the dark circles under your eyes? Didn't", "What was it about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon?", "was it about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are", "about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious?", "but that sounds like a good one! Did you see a dog in", "about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't know how", "serious? I don't know how to interpret dreams, but that sounds like a", "interpret dreams, but that sounds like a good one! Did you see a", "how to interpret dreams, but that sounds like a good one! Did you", "Did you see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0# 20 exp\") sm.giveExp(20)", "it about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you", "a dragon?\") sm.sendSay(\"Muahahahahaha, a dragon? Are you serious? I don't know how to", "dragon? Are you serious? I don't know how to interpret dreams, but that", "don't know how to interpret dreams, but that sounds like a good one!", "strange dream? What was it about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha,", "up? What's with the dark circles under your eyes? Didn't sleep well? Huh?", "What's with the dark circles under your eyes? Didn't sleep well? Huh? A", "Huh? A strange dream? What was it about? Whoa? A dream about a", "Are you serious? I don't know how to interpret dreams, but that sounds", "you serious? I don't know how to interpret dreams, but that sounds like", "see a dog in your dream, too? Hahaha!\\r\\n\\r\\n#fUI/UIWindow2.img/QuestIcon/8/0# 20 exp\") sm.giveExp(20) sm.completeQuest(parentID) sm.sendSayImage(\"UI/tutorial/evan/2/0\")", "dream? What was it about? Whoa? A dream about a dragon?\") sm.sendSay(\"Muahahahahaha, a", "circles under your eyes? Didn't sleep well? Huh? A strange dream? What was", "like a good one! Did you see a dog in your dream, too?", "under your eyes? Didn't sleep well? Huh? A strange dream? What was it", "a dragon? Are you serious? I don't know how to interpret dreams, but", "sleep well? Huh? A strange dream? What was it about? Whoa? A dream", "# 22000 | Strange dream (Evan intro) sm.setSpeakerID(1013101) sm.sendNext(\"Hey, Evan. You up? What's", "A strange dream? What was it about? Whoa? A dream about a dragon?\")" ]
[ "utf8 -*- # put this on /usr/local/bin/ # without .py extension from twython", "shout_transaction def on_error(self, status_code, data): print status_code print \"iniciando streaming\" class tw4: tak=", "client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print status_code print", "status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\" tat= \"2430470406-45gX6ihMxnKQQmjX2yR1VoaTQIddgY5bT7OSOzT\" tats=\"bHS4NkMwBFaysdVqnsT25xhNzZwEbM64KPdpRDB6RqZ2Z\" stream=MyStreamer(tw4.tak,tw4.taks,tw4.tat,tw4.tats)", "def on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction)", "if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction)", "data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\")", "#-*- coding: utf8 -*- # put this on /usr/local/bin/ # without .py extension", "shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print", "#! /usr/bin/env python #-*- coding: utf8 -*- # put this on /usr/local/bin/ #", "client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print status_code print \"iniciando streaming\" class", "on /usr/local/bin/ # without .py extension from twython import TwythonStreamer import datetime, pymongo", "print shout_transaction def on_error(self, status_code, data): print status_code print \"iniciando streaming\" class tw4:", "data): print status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\" tat= \"2430470406-45gX6ihMxnKQQmjX2yR1VoaTQIddgY5bT7OSOzT\"", "this on /usr/local/bin/ # without .py extension from twython import TwythonStreamer import datetime,", "try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print status_code", "twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text'", "'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print", "now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self,", "shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data):", "import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in", "status_code, data): print status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\" tat=", "# without .py extension from twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer):", "datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8')", "# put this on /usr/local/bin/ # without .py extension from twython import TwythonStreamer", "extension from twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data):", "class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout}", "on_error(self, status_code, data): print status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\"", "on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except:", "-*- # put this on /usr/local/bin/ # without .py extension from twython import", "put this on /usr/local/bin/ # without .py extension from twython import TwythonStreamer import", "except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print status_code print \"iniciando", "print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\" tat= \"2430470406-45gX6ihMxnKQQmjX2yR1VoaTQIddgY5bT7OSOzT\" tats=\"bHS4NkMwBFaysdVqnsT25xhNzZwEbM64KPdpRDB6RqZ2Z\" stream=MyStreamer(tw4.tak,tw4.taks,tw4.tat,tw4.tats) stream.statuses.filter(track=\"#aao0\")", "without .py extension from twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def", "import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: now=datetime.datetime.now()", "pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8')", "coding: utf8 -*- # put this on /usr/local/bin/ # without .py extension from", "data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def", ".py extension from twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self,", "nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code,", "TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data:", "print status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\" taks=\"jbuLKuamEaiNPXJfhfC9kaXYcoSSfRIgTldwuQYCcUJzEGNukU\" tat= \"2430470406-45gX6ihMxnKQQmjX2yR1VoaTQIddgY5bT7OSOzT\" tats=\"bHS4NkMwBFaysdVqnsT25xhNzZwEbM64KPdpRDB6RqZ2Z\"", "python #-*- coding: utf8 -*- # put this on /usr/local/bin/ # without .py", "from twython import TwythonStreamer import datetime, pymongo class MyStreamer(TwythonStreamer): def on_success(self, data): if", "/usr/bin/env python #-*- coding: utf8 -*- # put this on /usr/local/bin/ # without", "in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try: client.aaserver.shouts.insert(shout_transaction) except: client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction", "client=pymongo.MongoClient(\"mongodb://labmacambira:macambira00@ds031948.mongolab.com:31948/aaserver\") client.aaserver.shouts.insert(shout_transaction) print shout_transaction def on_error(self, status_code, data): print status_code print \"iniciando streaming\"", "def on_error(self, status_code, data): print status_code print \"iniciando streaming\" class tw4: tak= \"U3gkdcw144pb3H315Vsmphne5\"", "/usr/local/bin/ # without .py extension from twython import TwythonStreamer import datetime, pymongo class", "MyStreamer(TwythonStreamer): def on_success(self, data): if 'text' in data: now=datetime.datetime.now() nick=data['user'][\"screen_name\"].encode('utf-8') shout=data['text'].encode('utf-8') shout_transaction={\"time\":now,\"nick\":nick,\"shout\":shout} try:" ]
[ "# coding=utf-8 # module provided just for backward compatibility from .misc import *" ]
[ "0 MINOR = 1 PATCH = 0 PRE_RELEASE = '' # Use the", "the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)", "'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils'", "''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo'", "'<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python", "Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH,", "patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__", "= '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ =", "<gh_stars>0 # heavily inspired by https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH", "https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH = 0 PRE_RELEASE = ''", "formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ =", "heavily inspired by https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH = 0", "= 0 PRE_RELEASE = '' # Use the following formatting: (major, minor, patch,", "1 PATCH = 0 PRE_RELEASE = '' # Use the following formatting: (major,", "= 'python code' __license__ = 'MIT License' __keywords__ = 'machine learning, NLP, pytorch,", "= 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__", "__shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils'", "+ ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ =", "following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__", "by https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH = 0 PRE_RELEASE =", "MINOR = 1 PATCH = 0 PRE_RELEASE = '' # Use the following", "= (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3]))", "PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__", "__download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT License' __keywords__ =", "= 0 MINOR = 1 PATCH = 0 PRE_RELEASE = '' # Use", "__homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code'", "MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])", "'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT License' __keywords__ = 'machine learning,", "= '' # Use the following formatting: (major, minor, patch, pre-release) VERSION =", "__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__", "VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str,", "PATCH = 0 PRE_RELEASE = '' # Use the following formatting: (major, minor,", "pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ =", "'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils'", "'.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>'", "'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ =", "(MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) +", "(major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str,", "0 PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, pre-release)", "inspired by https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH = 0 PRE_RELEASE", "# heavily inspired by https://github.com/NVIDIA/NeMo MAJOR = 0 MINOR = 1 PATCH =", "= 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT License' __keywords__ = 'machine", "__contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__", "'.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ =", "__package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__", "= 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT License'", "= 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ =", "= '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__", "# Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR,", "PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ =", "= 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ =", "= '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ =", "__repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT", "VERSION[:3])) __version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo'", "__description__ = 'python code' __license__ = 'MIT License' __keywords__ = 'machine learning, NLP,", "'python code' __license__ = 'MIT License' __keywords__ = 'machine learning, NLP, pytorch, tts,", "minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) __shortversion__ = '.'.join(map(str, VERSION[:3]))", "VERSION[:3])) + ''.join(VERSION[3:]) __package_name__ = 'tilosutils' __contact_names__ = 'dertilo' __contact_emails__ = '<EMAIL>' __homepage__", "MAJOR = 0 MINOR = 1 PATCH = 0 PRE_RELEASE = '' #", "__contact_emails__ = '<EMAIL>' __homepage__ = 'https://github.com/dertilo' __repository_url__ = 'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__", "= 1 PATCH = 0 PRE_RELEASE = '' # Use the following formatting:", "'' # Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR,", "__license__ = 'MIT License' __keywords__ = 'machine learning, NLP, pytorch, tts, speech, language'", "'https://github.com/dertilo/tilosutils' __download_url__ = 'https://github.com/dertilo/tilosutils' __description__ = 'python code' __license__ = 'MIT License' __keywords__", "PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, pre-release) VERSION", "code' __license__ = 'MIT License' __keywords__ = 'machine learning, NLP, pytorch, tts, speech," ]
[ "import setup setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT',", "from setuptools import setup setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay',", "setup setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True,", "description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'],", "sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[],", "wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[ 'xmltodict',", "setuptools import setup setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>',", "<gh_stars>1-10 from setuptools import setup setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat", "setup( name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>',", "packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz',", "author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[ 'xmltodict', 'requests', 'dicttoxml',", "license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[ 'xmltodict', 'requests', 'dicttoxml', ]", "name='wechat-pay-sdk', packages=['wechatpay'], version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay',", "pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[ 'xmltodict', 'requests',", "include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[ 'xmltodict', 'requests', 'dicttoxml', ] )", "for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat', 'pay'], classifiers=[], install_requires=[", "version='0.6.2', description='A sdk for wechat pay', author='<NAME>', license='MIT', include_package_data=True, author_email='<EMAIL>', url='https://github.com/Narcissist1/wechat-pay', download_url='https://github.com/Narcissist1/wechat-pay/archive/0.1.tar.gz', keywords=['wechat'," ]
[ "URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN", "elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle,", "re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le", "pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser", "= oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour", "affichage des saisons sTitle = re.sub(' - Saison \\d+', '', sMovieTitle) + '", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler", "'') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent", "oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle)", "'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste = []", "'?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler()", "cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent =", "') sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '')", ") liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/']", "(aResult[0] == True): return aResult[1][0] return False def showSeries(sLoop = False): oGui =", "#33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique #", "'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies')", "URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN", "URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN", "#ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', '", "+ 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN +", "SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS", "= cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0] +", "astuce en cas d'episode unique # if (aResult[0] == False) and (sLoop ==", "& Mangas en streaming. Tout les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/'", "oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb,", "les liens jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for", "on retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]',", "sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent =", "teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser", "'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES =", "for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle,", "sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">')", "(len(aResult) == 0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0]", "liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append(", "SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST", "oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult", "== True): return aResult[1][0] return False def showSeries(sLoop = False): oGui = cGui()", "'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/']", "sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop =", "'').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb',", "jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern =", "oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les liens jheberg elif", "'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/']", "en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent", "récuperer les liens jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult:", "oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0])", "re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if sSearch: sDesc = '' else:", "SXXEX: # on vire le double affichage des saisons sTitle = re.sub(' -", "['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z',", "sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1", "'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS", "and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] ==", "= (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN +", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster =", "'[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"'", "cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler", "'' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler()", "if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant", "# pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl)", "class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] ==", "sMovieTitle) + ' ' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle =", "VSlog from resources.lib.multihost import cJheberg import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER", "liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN +", "sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui()", "sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl')", "'', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_)", "if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters():", "> 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle", "oGui = cGui() liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append(", "= cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult =", "'[/COLOR]' else: sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New', '') sDisplayTitle", "oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total =", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png',", "style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc =", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler =", "+ 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN +", "else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler", "une serie if (len(aResult) == 0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True)", "= cGui() liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C',", "progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if", "oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER,", "'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR", "from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '',", "re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC", "'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV',", "= oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire", "oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0]", "sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler", "(URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True,", "aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer tous les liens", "aResult[1][0] return False def showSeries(sLoop = False): oGui = cGui() oParser = cParser()", ") liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] )", "if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler()", "= sMovieTitle + ' ' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler", "sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent =", "cJheberg import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME =", "style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0]", "sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent =", "SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on", "except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) #", "#oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] == True): total = len(aResult[1]) progress_", "liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append(", "cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler", "aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries',", "re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas'", "0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True):", "Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic',", "= re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le double affichage des", "[] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] )", "'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png',", "oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films", ") liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/']", "URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN", "vire le double affichage des saisons sTitle = re.sub(' - Saison \\d+', '',", "['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller',", "sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc = '' try: sPattern", ") liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] )", "aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl", "URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN", "liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append(", "''), aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if", "cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0] return False", "= 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries & Mangas en streaming.", "URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN", "aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger',", "= aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/',", "= sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]',", "== False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] == True): total", "__checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent,", "['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille',", "oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText", "vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8 #", "sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in", "+ 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV',", "oGui = cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle", "# on vire le double affichage des saisons sTitle = re.sub(' - Saison", "+ 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList')", "if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"'", "= (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES =", "from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import cJheberg import re, unicodedata", "oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries',", "URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl", "'<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div", "'<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in aResult[1]:", "= cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler", "# pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1]", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch():", "+ ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle +", "cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent,", "+ 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN +", "= oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if", "total) if progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]'", "sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER,", "URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN", "if SXXEX: # on vire le double affichage des saisons sTitle = re.sub('", "'...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb)", "'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films", "= progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]:", "in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' +", "oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler", "+ 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN +", "MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries", "for aEntry in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster !=", "sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]',", "cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer les 3 ligne en trop", "= sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc = '' try: sPattern =", "sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)'", ") liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/']", "sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False):", "oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle", "oGui.setEndOfDirectory() def showList(): oGui = cGui() liste = [] liste.append( ['0-9', URL_MAIN +", "!= False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler)", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]',", ") liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] )", "oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return", "+ '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui() if", "MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER,", "progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche", "# stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: #", "style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a", "sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def", "sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] ==", "['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O',", "= sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture de sHtmlContent pour prendre", "break # Si recherche et trop de resultat, on nettoye if sSearch and", "liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/']", "= sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters',", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler", "= ''): oGui = cGui() if sSearch: sUrl = sSearch.replace(' ', '+') sPattern", "= sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent,", "['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle,", "== False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME)", "oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False):", "SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN", "qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern", "URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN", "'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/']", "'StreamingK' SITE_DESC = 'Films, Séries & Mangas en streaming. Tout les meilleurs streaming", "resources.lib.util import cUtil from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import cJheberg", "'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]:", "' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle + '", "= oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory()", "= cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii',", "sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on retire", "if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue", "'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): for", "Magouille pour virer les 3 ligne en trop en cas de recherche sHtmlContent", "else: sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New', '') sDisplayTitle =", "== True): for aEntry in aResult[1]: sHosterUrl = aEntry # pour récuperer tous", "oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1],", "sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)", "oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl =", "sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb,", ") liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] )", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle)", ") liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN +", "= 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/',", "liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN +", "récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent =", "['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts", "# clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films,", "return def showGenres(): oGui = cGui() liste = [] liste.append( ['Action', URL_MAIN +", "URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle,", "oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False):", "URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui =", "= oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent", "sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span", "in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement", "(URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN", "#ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"'", "-*- coding: utf-8 -*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui", "les liens et pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b>", "cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.util import cUtil", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui", "sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler =", "oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)", "for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche et", "if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry oHoster =", "Saisons et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a", "'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN +", "def showSeries(sLoop = False): oGui = cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler()", "'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui() if sSearch: sUrl", "cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] ==", "+ '[/COLOR]' else: sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New', '')", "pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle,", "href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer les 3", "sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '',", "'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/']", "import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.util import", "aEntry[1] if sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;',", "['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY", "center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for", "= False): oGui = cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl =", "if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry # pour", "sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste = [] liste.append(", "sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1)", "['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I',", "3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for", "sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent", "TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler()", "= oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl =", "try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if", "MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN", "# Magouille pour virer les 3 ligne en trop en cas de recherche", "= sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace('", "[] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] )", "['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U',", "liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append(", "= sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler", "et pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '')", "+ 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN +", "+ 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN +", "'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/']", "sHtmlContent pour prendre les liens et pour récuperer le dernier episode sHtmlContent =", "[0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in", "+ 'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl)", "'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS =", "+ 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN", "['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R',", "+ 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN +", "oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste = [] liste.append( ['0-9', URL_MAIN", "(URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/',", "# Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler", "sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() #", "liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append(", "sHosterUrl, sThumb) # pour récuperer les liens jheberg elif 'jheberg' in sHosterUrl: aResult", "= (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui()", "(Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)',", "'<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc = ''", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)", "+ SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR", "sHosterUrl = aEntry # pour récuperer tous les liens if '&url=' in sHosterUrl:", "oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui() if sSearch: sUrl =", "button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il y a rien", "elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER,", "showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl =", "if (len(aResult) == 0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if", "retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '',", "sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]:", "sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern", "sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ')", "oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl,", "aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon'", "showList(): oGui = cGui() liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] )", "showSeries(sLoop = False): oGui = cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl", "cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry in aResult[1]:", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle +", "sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult =", "aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else:", "sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste", "'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard()", "if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in", "URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN", "total) if progress_.iscanceled(): break # Si recherche et trop de resultat, on nettoye", "'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/']", "= cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb =", "sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult", "URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES", "(URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies'", "#ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il y", "!= False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if", "+ '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN +", "aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl =", "coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle + ' ' +", "import cParser from resources.lib.util import cUtil from resources.lib.comaddon import progress #, VSlog from", "cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui", "# astuce en cas d'episode unique # if (aResult[0] == False) and (sLoop", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "en trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment',", "= '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler =", "= [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/']", "(URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN", "= 'showMovies' def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER,", "URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN", "cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent", "'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch", "et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture de", ") liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] )", "'Films, Séries & Mangas en streaming. Tout les meilleurs streaming en illimité.' URL_MAIN", "from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from", "'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/']", "sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl", ") liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] )", "sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc = '' try: sPattern = '</p><p", "'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/',", "recherche et trop de resultat, on nettoye if sSearch and total > 2:", "SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN", "= 'Films, Séries & Mangas en streaming. Tout les meilleurs streaming en illimité.'", "+ 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN +", "accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture", "pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def", "+ 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN +", ") liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] )", "aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl,", "'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR =", "= (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH =", "= aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle =", "[Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on retire la qualité sTitle", "streaming. Tout les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN", "= oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent =", "liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] )", "URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN", "sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]'", "'</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc =", "'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/']", "sTitle = re.sub(' - Saison \\d+', '', sMovieTitle) + ' ' + SXXEX.group(1)", "# oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for aEntry in aResult[1]: if", "load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png',", "cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison')", "= re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1]", "cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les liens jheberg elif 'jheberg' in", "= (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS =", "oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1],", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "trop de resultat, on nettoye if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0],", "oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui() if sSearch: sUrl = sSearch.replace('", "clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries", "(VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png',", "resources.lib.multihost import cJheberg import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com'", "cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape')", "# #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] == True): total = len(aResult[1])", "a afficher c'est peut etre une serie if (len(aResult) == 0) and (sLoop", "from resources.lib.multihost import cJheberg import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER =", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler", "oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color:", "URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui =", "'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters',", "URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste =", "['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance',", "du Synopsis sDesc = '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">'", "0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle:", "'\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent,", "+ 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN +", "'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch =", "+ 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies')", "nettoye if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0:", "'', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if sSearch:", "from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from", "+ '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui() oOutputParameterHandler =", "'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1],", "= oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...')", "'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=',", "class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer les", "aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison", "sMovieTitle + ' ' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler =", "jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in", "resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.util", "aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True):", "récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser =", "' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl)", "serie if (len(aResult) == 0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return", "if aResult: for aEntry in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if", "qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb", ") liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] )", "(oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les", "if progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' +", "liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append(", "sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry #", "'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler)", "import progress #, VSlog from resources.lib.multihost import cJheberg import re, unicodedata # clone", "or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif", "+ '[/COLOR]') else: # Saisons et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<',", "(URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN", "liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def", "sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern)", "!= False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les liens", "liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append(", "['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] )", "-*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from", "sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage)", "oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern =", "page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True):", "= re.sub(' - Saison \\d+', '', sMovieTitle) + ' ' + SXXEX.group(1) if", "+ '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def", "= 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True):", "aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: #", "URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH", "return aResult[1][0] return False def showSeries(sLoop = False): oGui = cGui() oParser =", "1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append(", "import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK'", "liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for", "liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append(", "URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux',", "= cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if", "sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR", "= cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl)", "'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN +", "(aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl)", "sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage !=", "SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries & Mangas en", "sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle =", "'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in", "False) and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0]", "\"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent =", "+ '[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer tous les liens if", "oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste = [] liste.append( ['Action', URL_MAIN", "in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1 or", "sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') #", "et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)',", "'\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming'", "(sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres():", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "- Saison \\d+', '', sMovieTitle) + ' ' + SXXEX.group(1) if HOST: HOST", "\\d+', '', sMovieTitle) + ' ' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0]", "+ aEntry[0] + '[/COLOR]') else: # Saisons et episodes sUrl = aEntry[2] SXXEX", "from resources.lib.parser import cParser from resources.lib.util import cUtil from resources.lib.comaddon import progress #,", "oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER,", "['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie", "oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui()", "utf-8 -*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui", "SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries & Mangas en streaming. Tout les", "= aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX:", "oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb", "'').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace('", "= cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler =", "# Réécriture de sHtmlContent pour prendre les liens et pour récuperer le dernier", "sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_)", "', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl =", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory()", "[COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''):", "sHosterUrl = aEntry[1] # pour récuperer tous les liens if '&url=' in sHosterUrl:", "aResult[1]: sHosterUrl = aEntry # pour récuperer tous les liens if '&url=' in", "recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>')", "oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry", "(Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png',", "'-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1", "'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText", "'<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult", "sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a", "= (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES =", "liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append(", "sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et", "de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees',", "total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 = aEntry[0]", "''): oGui = cGui() if sSearch: sUrl = sSearch.replace(' ', '+') sPattern =", "['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie',", "if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1]", "de resultat, on nettoye if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''),", "cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour", "'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER,", "URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN", "'[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui() if sSearch:", "(True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/',", "= cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory()", "prendre les liens et pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color:", "'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\"", "= re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire", "= cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle =", "False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] == True): total =", "showHosters(sLoop = False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle", "(VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png',", "['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique',", "== True): for aEntry in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if", "(sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if (aResult[0] == True):", "(URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN", "progress #, VSlog from resources.lib.multihost import cJheberg import re, unicodedata # clone de", "récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent =", "= sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\'", "resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import cJheberg import re, unicodedata #", "'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries", "aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else:", "oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser()", "sPattern) if (aResult[0] == True): return aResult[1][0] return False def showSeries(sLoop = False):", "oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer", "(URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/',", "+ 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN +", "sDisplayTitle = sTitle # on retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle)", "aEntry in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False):", "sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le double", "sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent,", ") liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] )", "'showMovies' def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch',", "MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR", "False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les liens jheberg", ") liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] )", "= aEntry[1] # pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl", "sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle", "sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl:", "oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern)", "'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui", "resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.util import cUtil from resources.lib.comaddon", "remet en utf-8 # Réécriture de sHtmlContent pour prendre les liens et pour", "in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream'", "= '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if", "MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1],", "= cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb =", "de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries &", "'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/']", "aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0] return False def", "Séries & Mangas en streaming. Tout les meilleurs streaming en illimité.' URL_MAIN =", "liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append(", "+ 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN +", "= aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster,", "sPattern) # Si il y a rien a afficher c'est peut etre une", "SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions", "'<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if", "SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER,", "import cJheberg import re, unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME", "sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult =", "'', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage", "liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler", "progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche et trop de resultat, on", "in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl", "pour récuperer le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent", "'[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer", "for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: # stream", "re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le double affichage des saisons", "False): oGui = cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl')", "meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies')", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler", "'' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern)", "+ 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN +", "serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')", "URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure',", "liens et pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>',", "in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle,", "URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p',", "oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry", "'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies')", "+ 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN +", "'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS", "re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if", "SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries", "'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste:", "'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies')", "class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il y a", "['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L',", "return False def showSeries(sLoop = False): oGui = cGui() oParser = cParser() oInputParameterHandler", "SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]'", "ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png',", "+ ' ' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle", "URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique',", "def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult =", "sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass", "sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui =", "['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append(", "[COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle + ' '", "+ sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui =", "cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster", "oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory()", "les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/',", "= sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser =", "== False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for aEntry in", "sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not", "URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN", ") liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] )", "resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler", "oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de", "if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern", "(aResult[0] == True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' +", "False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster", "'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)", "etre une serie if (len(aResult) == 0) and (sLoop == False): # oGui.setEndOfDirectory()", "sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series'", "False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory()", "HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else:", "= oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True):", "'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/',", "if sSearch: sUrl = sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else:", "'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent)", "On remet en utf-8 # Réécriture de sHtmlContent pour prendre les liens et", "sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory()", "== True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_,", "+ HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle + ' ' + aEntry[1].replace('", ") liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN +", "'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc,", "oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler)", "'\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler", "d'episode unique # if (aResult[0] == False) and (sLoop == False): # #oGui.setEndOfDirectory()", "sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle", "return if (aResult[0] == True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR", "2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle", "'?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=',", "unicodedata # clone de dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC =", "oGui = cGui() liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append(", "= cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0] return", "oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER,", "in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc,", "= oParser.parse(sHtmlContent, sPattern) # Si il y a rien a afficher c'est peut", "liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle", "sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on retire la qualité", "'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/']", "illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN", "'...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in", "progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0]", "'<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0]", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "aEntry in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False):", "liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append(", "liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/']", "progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR", "= (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS = (URL_MAIN +", "cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0]", "oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler", "liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] )", "sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern)", "'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-',", "else: sHosterUrl = aEntry[1] # pour récuperer tous les liens if '&url=' in", "https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler", "['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append(", "'', sMovieTitle) + ' ' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle", "'') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on", "= (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV =", "= sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler()", "= sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle =", "import cUtil from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import cJheberg import", "oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() #", "URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions", "+ 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN", "len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break", "'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)',", "HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize() +", "oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler = cInputParameterHandler()", "des saisons sTitle = re.sub(' - Saison \\d+', '', sMovieTitle) + ' '", "SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0])", "ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png',", "= oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique # if (aResult[0] ==", "+ aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer tous les", "sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if", "= sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] ==", "'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/']", "# on retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+", "= (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR =", "in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle)", "= __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies',", "oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else:", "pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] #", "(Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)',", "= cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la qualité", "oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres [COLOR coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def", "+ sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste = []", "SITE_DESC = 'Films, Séries & Mangas en streaming. Tout les meilleurs streaming en", "'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/']", "streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE", "cGui() liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN", "= progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si", "'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler)", "oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for aEntry in aResult[1]: if aEntry[0]:", "+ 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D',", "'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult", "URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN", "'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/']", "pour récuperer les liens jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if", "in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult =", "sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl = aEntry", "liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append(", "stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: # Saisons", "['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler()", "= cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0]", "'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies')", "+ 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN +", "URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN", "(URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN", "il y a rien a afficher c'est peut etre une serie if (len(aResult)", "= oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult =", "' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + '", "center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;',", "en cas d'episode unique # if (aResult[0] == False) and (sLoop == False):", "showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste = [] liste.append( ['Action',", ") liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] )", "(True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/',", "sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc,", "' ') sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]',", "'<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si", "False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui =", "'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/']", "= oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture", "red]' + aEntry[0] + '[/COLOR]') else: # Saisons et episodes sUrl = aEntry[2]", "'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=', 'showMovies')", "progress_.iscanceled(): break # Si recherche et trop de resultat, on nettoye if sSearch", "= '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc", "== False) and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) # return if", "SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER,", "'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en", "oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except:", "sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color:", "cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl)", "aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle)", ") liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] )", "['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier',", "pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce", "Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame',", "oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False):", ") liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] )", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler", "'\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture de sHtmlContent", "sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl)", "['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western',", "sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl:", "(aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1]) progress_ =", "sThumb = aEntry[1] if sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;',", "oParser.parse(sHtmlContent, sPattern) # Si il y a rien a afficher c'est peut etre", "'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/',", "style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique", "aEntry in aResult[1]: sHosterUrl = aEntry # pour récuperer tous les liens if", "cGui() liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN", "MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])", "aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle)", "'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries & Mangas en streaming. Tout", "sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]' else: sTitle", "class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a", "'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/']", "'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER,", "sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if", "aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique # if (aResult[0]", "['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append(", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/']", "liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] )", "(aResult[0] == False) and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) # return", "oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent", "cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl)", "sUrl) if SXXEX: # on vire le double affichage des saisons sTitle =", "# Si recherche et trop de resultat, on nettoye if sSearch and total", "oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0] return False def showSeries(sLoop =", "episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl)", "in aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER,", "telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: # Saisons et episodes", "sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)", "oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent =", "liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append(", "= '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour", "['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle',", "aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl", "FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/')", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler =", "coding: utf-8 -*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import", "<gh_stars>1-10 # -*- coding: utf-8 -*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui", "False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui()", "for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]')", "en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE =", "sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle =", "elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '',", "+ 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl", "#, VSlog from resources.lib.multihost import cJheberg import re, unicodedata # clone de dpstreaming.tv", "sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1])", "progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break #", "cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"'", "SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries", "= len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled():", "2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle =", "liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append(", "'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/']", "sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass", "def showMovies(sSearch = ''): oGui = cGui() if sSearch: sUrl = sSearch.replace(' ',", "URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_MOVIE = (URL_MAIN +", "= aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle',", "not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "sTitle = sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle", "sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] ==", "MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES", "on nettoye if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) ==", "sUrl = sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler =", "oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif", "sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if sSearch: sDesc =", "+ 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN +", "oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch:", "cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle)", "Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span", "oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8',", "pour virer les 3 ligne en trop en cas de recherche sHtmlContent =", "liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append(", "aResult: for aEntry in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster", "True): return aResult[1][0] return False def showSeries(sLoop = False): oGui = cGui() oParser", "= cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else:", "REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES", "+ 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN +", "progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler =", "+ ' ' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler()", "Synopsis sDesc = '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult", ">>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next page-numbers\" href=\"([^\"]+)\"' oParser =", "oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1],", "and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for", "+ 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN +", "aResult = oParser.parse(sHtmlContent, sPattern) # Si il y a rien a afficher c'est", "liens jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry", "if (sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def", ") liste.append( ['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in liste:", "oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique # if (aResult[0] == False)", "sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en", "sPattern) # astuce en cas d'episode unique # if (aResult[0] == False) and", "aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: # stream ou", ") liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] )", "import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import", "'<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer", "['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append(", "REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH", "sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern)", "sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace('", "'', sTitle) sThumb = aEntry[1] if sSearch: sDesc = '' else: sDesc =", "sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle", "') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href')", "href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode unique # if", "'', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb,", "sDesc = '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult =", "= sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult", "rien a afficher c'est peut etre une serie if (len(aResult) == 0) and", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)", "sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture de sHtmlContent pour", "oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\") sHtmlContent = unicodedata.normalize('NFD',", "aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] #", "cas d'episode unique # if (aResult[0] == False) and (sLoop == False): #", "style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\"", "Réécriture de sHtmlContent pour prendre les liens et pour récuperer le dernier episode", "Si il y a rien a afficher c'est peut etre une serie if", "ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: # Saisons et", "aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a", "sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent", "!= False): sUrl = URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui", "= (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH =", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern", "# récupération du Synopsis sDesc = '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p", "'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/']", "'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/']", "sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle,", "sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser", "[Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle #", "= oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total", "oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]:", "sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster,", "liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append(", "virer les 3 ligne en trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee',", "+ 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN", "(aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]:", "['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur',", "def showList(): oGui = cGui() liste = [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/']", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler =", "style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il", "'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES", ") liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/']", "if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer", "return if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry", "= oRequestHandler.request() # Magouille pour virer les 3 ligne en trop en cas", "= unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') #", "def load(): oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche',", "liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append(", "['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR',", "unique # if (aResult[0] == False) and (sLoop == False): # #oGui.setEndOfDirectory() #", "= cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) #", "['Emissions TV', URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler =", "'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png',", "liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append(", "aEntry[1] # pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl =", "continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle", "= cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl = aEntry oHoster =", "oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui =", "Si recherche et trop de resultat, on nettoye if sSearch and total >", "coral]' + sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui", "pass elif 'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+',", "# Saisons et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST =", "liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append(", "sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb =", "oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1],", "'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/']", "sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle = sTitle.replace(' [Streaming]', '')", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler", "SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV", "# -*- coding: utf-8 -*- # Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from", "</b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ')", "True): for aEntry in aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster", "= (True, 'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN +", "oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "+ 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN +", "oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent): sPattern = '<a class=\"next", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler =", "cParser from resources.lib.util import cUtil from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost", "red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer tous", "'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers", "False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')", "resources.lib.parser import cParser from resources.lib.util import cUtil from resources.lib.comaddon import progress #, VSlog", "+ 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN +", "lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request() sPattern", "sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER,", "(Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)',", "in aResult[1]: sHosterUrl = aEntry # pour récuperer tous les liens if '&url='", "resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser", "if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn',", "oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser()", "aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle)", "['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre',", "liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append(", "' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a", "#ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large", "'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/']", ") liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] )", "!= False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui =", "cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser", ") liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] )", "def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle =", "target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il y a rien a afficher", "SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER,", "sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)", "= sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on retire la", "peut etre une serie if (len(aResult) == 0) and (sLoop == False): #", "cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl = URL_SEARCH[0] + sSearchText", "oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl", "in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl =", "= (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN +", "double affichage des saisons sTitle = re.sub(' - Saison \\d+', '', sMovieTitle) +", "# serieHosters(True) # return if (aResult[0] == True): total = len(aResult[1]) progress_ =", ") liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] )", ") liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] )", "'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN +", "sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if", "import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl", "(aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry # pour récuperer", "href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) # Si il y a rien a", "sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0]", ") liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] )", "= oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request()", "else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...') oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "le dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b>", "'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers", "sHtmlContent = oRequestHandler.request() # Magouille pour virer les 3 ligne en trop en", "(sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for aEntry", "oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry", "'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler)", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList():", "re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER,", "sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille", "True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] +", "= False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle =", "oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])", "oRequestHandler.request() sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for", "URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire',", "New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb',", "et trop de resultat, on nettoye if sSearch and total > 2: if", "URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN", "(oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui", "sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern)", "ligne en trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent =", "sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser()", "récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc,", "if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1]) progress_", "+ 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN +", "sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=',", "resultat, on nettoye if sSearch and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2])", "trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>')", "oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: # Saisons et episodes sUrl", "'') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb)", "liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN + 'category/series-tv/s-t-u/'] ) liste.append(", "liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append(", "oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0]", "+ 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies')", "'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies')", "sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png',", "sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste = [] liste.append(", "progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: #", "+ 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F', URL_MAIN +", "'<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER)", "aEntry # pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl =", "oGui = cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler)", "oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False): oGui.addText(SITE_IDENTIFIER) if", "showGenres(): oGui = cGui() liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] )", "cGui() oParser = cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')", "HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize()", "'[COLOR red]' + aEntry[0] + '[/COLOR]') else: # Saisons et episodes sUrl =", "= cGui() if sSearch: sUrl = sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a", "sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent", "(URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN", "oGui = cGui() if sSearch: sUrl = sSearch.replace(' ', '+') sPattern = '<div", "== 0: continue sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in", "= sTitle # on retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle", "showSeries(True) return if (aResult[0] == True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER,", "# Si il y a rien a afficher c'est peut etre une serie", "(URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load(): oGui = cGui() oOutputParameterHandler", "sPattern = '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry", "cGui() if sSearch: sUrl = sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"'", "oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage = __checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler", "in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ') sTitle", "sTitle # on retire la qualité sTitle = re.sub('\\[\\w+]', '', sTitle) sTitle =", "if (aResult[0] == False) and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True) #", "sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent", "oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)", "= oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl =", "= (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST =", "récupération du Synopsis sDesc = '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align:", "sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern =", "= '<iframe.+?src=\"([^\"]+)\"' aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): for aEntry in", "href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return", "sTitle) sThumb = aEntry[1] if sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]',", "import cRequestHandler from resources.lib.parser import cParser from resources.lib.util import cUtil from resources.lib.comaddon import", "'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1], 'Séries (Liste)', 'listes.png', oOutputParameterHandler)", "' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '')", "sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis", "oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>' oRequestHandler =", "= URL_SEARCH[0] + sSearchText showMovies(sUrl) oGui.setEndOfDirectory() return def showGenres(): oGui = cGui() liste", "'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui", "3 ligne en trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent", "'...') except: pass sPattern = '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern)", "= '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult = oParser.parse(sHtmlContent, sPattern) #", "oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la", "oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser = cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0]", "liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] )", "de sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color:", "liste.append( ['Science-Fiction', URL_MAIN + 'category/films/science-fiction/'] ) liste.append( ['Spectacle', URL_MAIN + 'category/films/spectacle/'] ) liste.append(", "sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet", "Vstream https://github.com/Kodi-vStream/venom-xbmc-addons from resources.lib.gui.hoster import cHosterGui from resources.lib.gui.gui import cGui from resources.lib.handler.inputParameterHandler import", "HOST.capitalize() + '[/COLOR]' else: sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New',", "re.sub(' - Saison \\d+', '', sMovieTitle) + ' ' + SXXEX.group(1) if HOST:", "'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/']", "oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern", "liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN + 'category/films/arts-martiaux/'] )", "'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN +", "Tout les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS = (URL_MAIN +", "+ 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN + 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN +", "sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '',", "sSearch: sUrl = sSearch.replace(' ', '+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler", "REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText =", "oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler =", "sPattern) if (aResult[0] == True): for aEntry in aResult[1]: sHosterUrl = aEntry oHoster", "aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1]", "liste.append( ['D-E-F', URL_MAIN + 'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append(", "(oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl)", "= sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span", "sHtmlContent.encode('utf-8') # On remet en utf-8 # Réécriture de sHtmlContent pour prendre les", "def showGenres(): oGui = cGui() liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/']", "pour prendre les liens et pour récuperer le dernier episode sHtmlContent = sHtmlContent.replace('<span", "sThumb) # pour récuperer les liens jheberg elif 'jheberg' in sHosterUrl: aResult =", "'[/COLOR]') else: sHosterUrl = aEntry[1] # pour récuperer tous les liens if '&url='", "= aEntry # pour récuperer tous les liens if '&url=' in sHosterUrl: sHosterUrl", "'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/']", "cParser() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb')", "else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage", "if (aResult[0] == True): return aResult[1][0] return False def showSeries(sLoop = False): oGui", "c'est peut etre une serie if (len(aResult) == 0) and (sLoop == False):", "= cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer les 3 ligne en", "episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', '", "+ 'category/series-tv/s-t-u/'] ) liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in", "'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] ) liste.append( ['Emissions TV', URL_MAIN", "oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler", "= cGui() liste = [] liste.append( ['Action', URL_MAIN + 'category/films/action/'] ) liste.append( ['Animation',", "+ 'category/films/spectacle/'] ) liste.append( ['Thriller', URL_MAIN + 'category/films/thriller/'] ) liste.append( ['Western', URL_MAIN +", "' ' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "dpstreaming.tv SITE_IDENTIFIER = 'streamingk_com' SITE_NAME = 'StreamingK' SITE_DESC = 'Films, Séries & Mangas", "= sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span", "en utf-8 # Réécriture de sHtmlContent pour prendre les liens et pour récuperer", "sTitle + '[/COLOR]', 'az.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMovies(sSearch = ''): oGui = cGui()", "sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent", "aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER,", "oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() # Magouille pour virer les 3 ligne", "'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste = [] liste.append( ['0-9',", "dernier episode sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>',", "</b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src',", "import cGui from resources.lib.handler.inputParameterHandler import cInputParameterHandler from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import", "'') sDisplayTitle = sTitle # on retire la qualité sTitle = re.sub('\\[\\w+]', '',", "liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN + 'category/films/espionnage/'] ) liste.append(", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler =", "sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\"", "+ 'category/films/western/'] ) liste.append( ['VOSTFR', URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN", "Mangas en streaming. Tout les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS", "'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', ' ')", "aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult: sHosterUrl = aEntry oHoster", "if progress_.iscanceled(): break # Si recherche et trop de resultat, on nettoye if", "\\w+]', '', sTitle) sThumb = aEntry[1] if sSearch: sDesc = '' else: sDesc", "class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc", "+ 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies') MOVIE_GENRES = (True, 'showGenres')", "'Séries (Liste)', 'listes.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)',", "oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def", "cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler()", "= sTitle.replace(' [Streaming]', '') sTitle = sTitle.replace(' [Telecharger]', '').replace(' [Telechargement]', '') sDisplayTitle =", "HOST = re.search('a href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le double affichage", "'showSeries', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) elif 'mangas' in sUrl: oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle,", "from resources.lib.handler.outputParameterHandler import cOutputParameterHandler from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from", "oOutputParameterHandler.addParameter('sThumb', sThumb) oGui.addMisc(SITE_IDENTIFIER, 'serieHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop", "URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN", "'quelle-est-votre-serie-preferee' in aEntry[1]: pass elif 'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE):", "(sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]',", "if 'Brouillon' in sTitle: sTitle = sUrl1.rsplit('/', 2)[1] sTitle = sTitle.replace('-streaming-telecharger', '').replace('-', '", "oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\" href=\"([^<>\"]+?)\" target=\"(?:_blank|vid)\"' aResult", "sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui,", "'showMovies') MOVIE_MOVIE = (URL_MAIN + 'category/films/', 'showMovies') MOVIE_VOSTFR = (URL_MAIN + 'category/films/vostfr-films/', 'showMovies')", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler =", "oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]') else: sHosterUrl = aEntry[1] # pour", "total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total) if", "# return if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for", "from resources.lib.util import cUtil from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import", "on vire le double affichage des saisons sTitle = re.sub(' - Saison \\d+',", "['Fantastique', URL_MAIN + 'category/films/fantastique/'] ) liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique',", "# Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>',", "= oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent =", "+ 'category/films/espionnage/'] ) liste.append( ['Famille', URL_MAIN + 'category/films/famille/'] ) liste.append( ['Fantastique', URL_MAIN +", "oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) # pour récuperer les liens jheberg elif 'jheberg'", "'&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive) if", "# vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On remet en utf-8", "les 3 ligne en trop en cas de recherche sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>')", "sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;', '...')", "' ' + SXXEX.group(1) if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle +", "cRequestHandler from resources.lib.parser import cParser from resources.lib.util import cUtil from resources.lib.comaddon import progress", "'showMovies') REPLAYTV_NEWS = (URL_MAIN + 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies')", "if (aResult[0] == True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]'", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass", "if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in", "'series' in sUrl1 or re.match('.+?saison [0-9]+', sTitle, re.IGNORECASE): oGui.addTV(SITE_IDENTIFIER, 'showSeries', sDisplayTitle, '', sThumb,", "sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui,", "URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN + 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN", "'') # récupération du Synopsis sDesc = '' try: sPattern = '</p><p style=\"text-align:", "sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request() sHtmlContent = sHtmlContent.decode('utf-8', \"replace\")", "utf-8 # Réécriture de sHtmlContent pour prendre les liens et pour récuperer le", "if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien jwplayer(GoogleDrive)", "else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl,", "True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry in aResult[1]: progress_.VSupdate(progress_, total)", "= oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern = 'href=\"([^\"]+)\"' oParser =", "+ 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies') REPLAYTV_NEWS = (URL_MAIN +", "for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', 'Lettres", "cUtil from resources.lib.comaddon import progress #, VSlog from resources.lib.multihost import cJheberg import re,", "href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\".+?<p>([^<]+)</p>'", "aResult = oParser.parse(sHtmlContent, sPattern) if aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;',", "Saison \\d+', '', sMovieTitle) + ' ' + SXXEX.group(1) if HOST: HOST =", "['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append(", "False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] == True): for aEntry in aResult[1]:", "oOutputParameterHandler.addParameter('siteUrl', sUrl1) oOutputParameterHandler.addParameter('sMovieTitle', sTitle) oOutputParameterHandler.addParameter('sThumb', sThumb) if '-filmographie-streaming' in aEntry[1]: pass elif 'quelle-est-votre-serie-preferee'", "sUrl1 = aEntry[0] sTitle = aEntry[2].replace('Saiosn', 'Saison') if 'Brouillon' in sTitle: sTitle =", "== 0) and (sLoop == False): # oGui.setEndOfDirectory() showSeries(True) return if (aResult[0] ==", "= oParser.parse(sHtmlContent, sPattern) if (aResult[0] == True): return aResult[1][0] return False def showSeries(sLoop", "'category/films/action/'] ) liste.append( ['Animation', URL_MAIN + 'category/films/animation/'] ) liste.append( ['Arts Martiaux', URL_MAIN +", "= aEntry[1] if sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;',", "saisons sTitle = re.sub(' - Saison \\d+', '', sMovieTitle) + ' ' +", "oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) else: oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster !=", "aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche et trop de resultat,", "'category/series-tv/d-e-f/'] ) liste.append( ['G-H-I', URL_MAIN + 'category/series-tv/g-h-i/'] ) liste.append( ['J-K-L', URL_MAIN + 'category/series-tv/j-k-l/']", "oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText !=", "in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche et trop de", ") for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies',", "= cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') sPattern =", "if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] + '[/COLOR]')", "tous les liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer", "'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)',", "= 'StreamingK' SITE_DESC = 'Films, Séries & Mangas en streaming. Tout les meilleurs", "afficher c'est peut etre une serie if (len(aResult) == 0) and (sLoop ==", "URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] ) liste.append( ['S-T-U', URL_MAIN", ") liste.append( ['Comédie Dramatique', URL_MAIN + 'category/films/comedie-dramatique/'] ) liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/']", "showMovies(sSearch = ''): oGui = cGui() if sSearch: sUrl = sSearch.replace(' ', '+')", "= '' try: sPattern = '</p><p style=\"text-align: center;\">([^<]+)</p><p style=\"text-align: center;\">' aResult = oParser.parse(sHtmlContent,", "serieHosters(True) # return if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME)", "+ aEntry[1].replace(' New', '') sDisplayTitle = sTitle oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oOutputParameterHandler.addParameter('sMovieTitle',", "oOutputParameterHandler.addParameter('siteUrl', SERIE_VFS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VFS[1], 'Séries (VF)', 'vf.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0])", "le lien jwplayer(GoogleDrive) if 'filmhdstream' in sHosterUrl: oRequestHandler = cRequestHandler(sHosterUrl) sHtmlContent = oRequestHandler.request()", "else: # Saisons et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl) HOST", "['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage',", "unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8') # On", "') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') #", "oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showList(): oGui = cGui() liste =", ") liste.append( ['Guerre', URL_MAIN + 'category/films/guerre/'] ) liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] )", "'showList') SERIE_VFS = (URL_MAIN + 'category/series-tv/series-streaming-vf/', 'showMovies') SERIE_VOSTFR = (URL_MAIN + 'category/series-tv/series-streaming-vostfr/', 'showMovies')", "and total > 2: if cUtil().CheckOccurence(sSearch.replace(URL_SEARCH[0], ''), aEntry[2]) == 0: continue sUrl1 =", "if HOST: HOST = HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]' +", "break if aEntry[0]: # stream ou telechargement oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0] +", "'?s=', 'showMovies') URL_SEARCH_SERIES = (URL_MAIN + '?s=', 'showMovies') FUNCTION_SEARCH = 'showMovies' def load():", "liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le lien", "for aEntry in aResult[1]: sHosterUrl = aEntry # pour récuperer tous les liens", "URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical', URL_MAIN + 'category/films/musical/'] ) liste.append( ['Policier', URL_MAIN", "sHosterUrl, sThumb) oGui.setEndOfDirectory() def serieHosters(): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl =", "+ 'category/films/biopic/'] ) liste.append( ['Comédie', URL_MAIN + 'category/films/comedie/'] ) liste.append( ['Comédie Dramatique', URL_MAIN", "cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle) cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb) oGui.setEndOfDirectory() def", "TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if", "y a rien a afficher c'est peut etre une serie if (len(aResult) ==", "'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True, 'showList') SERIE_VFS =", "['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/'] ) liste.append( ['D-E-F',", "sTitle) sTitle = re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if sSearch: sDesc", "['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] ) liste.append( ['Musical',", "def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl')", "'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES = (URL_MAIN + '?s=',", "False def showSeries(sLoop = False): oGui = cGui() oParser = cParser() oInputParameterHandler =", "= HOST.group(1).split('/')[0] sDisplayTitle = sTitle + ' [COLOR coral]' + HOST.capitalize() + '[/COLOR]'", "= re.sub('\\[\\w+ \\w+]', '', sTitle) sThumb = aEntry[1] if sSearch: sDesc = ''", "data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc =", "in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle)", "en streaming. Tout les meilleurs streaming en illimité.' URL_MAIN = 'https://streamingk.net/' MOVIE_NEWS =", "+ 'category/emissions-tv/', 'showMovies') REPLAYTV_REPLAYTV = (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN +", "href=\"https*:\\/\\/([^.]+)', sUrl) if SXXEX: # on vire le double affichage des saisons sTitle", "sHtmlContent = unicodedata.normalize('NFD', sHtmlContent).encode('ascii', 'ignore').decode('unicode_escape') # vire accent et '\\' sHtmlContent = sHtmlContent.encode('utf-8')", "= cParser() aResult = oParser.parse(sUrl, sPattern) if (aResult[0] == True): for aEntry in", ") liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler", "= sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du", "= '<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas", "'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN", "[Telechargement]', '') sDisplayTitle = sTitle # on retire la qualité sTitle = re.sub('\\[\\w+]',", "oHoster, sHosterUrl, sThumb) # pour récuperer les liens jheberg elif 'jheberg' in sHosterUrl:", "# if (aResult[0] == False) and (sLoop == False): # #oGui.setEndOfDirectory() # serieHosters(True)", "+ 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN +", "= aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern = '<span style=\"color:", "= cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR teal]Suivant >>>[/COLOR]', oOutputParameterHandler) oGui.setEndOfDirectory() def __checkForNextPage(sHtmlContent):", "elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl) if aResult: for aEntry in aResult:", "aEntry in aResult[1]: progress_.VSupdate(progress_, total) if progress_.iscanceled(): break # Si recherche et trop", "URL_MAIN + 'category/films/vostfr-films/'] ) liste.append( ['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY", "liste.append( ['V-W-X-Y-Z', URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler =", "URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction', URL_MAIN", ") liste.append( ['Historique', URL_MAIN + 'category/films/historique/'] ) liste.append( ['Horreur', URL_MAIN + 'category/films/horreur/'] )", "'<strong><span style=\"color: #ff9900;\">') oParser = cParser() sPattern = '<strong><span style=\"color: #ff9900;\">([^<]+)<|<a class=\"large button.+?\"", "aEntry[0] + '[/COLOR]') else: # Saisons et episodes sUrl = aEntry[2] SXXEX =", "URL_MAIN + 'category/emissions-tv/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "aResult[1]: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster != False): oHoster.setDisplayName(sMovieTitle) oHoster.setFileName(sMovieTitle)", "SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_LIST[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_LIST[1],", "= [] liste.append( ['0-9', URL_MAIN + 'category/series-tv/0-9/'] ) liste.append( ['A-B-C', URL_MAIN + 'category/series-tv/a-b-c/']", "oRequestHandler.request() # Magouille pour virer les 3 ligne en trop en cas de", "# pour récuperer les liens jheberg elif 'jheberg' in sHosterUrl: aResult = cJheberg().GetUrls(sHosterUrl)", "if sSearch: sDesc = '' else: sDesc = aEntry[3].replace('[&hellip;]', '').replace('&hellip;', '...').replace('&rsquo;', '\\'').replace('&#8217;', '\\'').replace('&#8230;',", "sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui() oInputParameterHandler =", "MOVIE_GENRES = (True, 'showGenres') SERIE_SERIES = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN", "REPLAYTV_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, REPLAYTV_NEWS[1], 'Emissions TV', 'replay.png', oOutputParameterHandler) oGui.setEndOfDirectory() def showMoviesSearch(): oGui = cGui()", "= (URL_MAIN + 'category/emissions-tv/', 'showMovies') URL_SEARCH = (URL_MAIN + '?s=', 'showMovies') URL_SEARCH_MOVIES =", "sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération", "== True): for aEntry in aResult[1]: if aEntry[0]: oGui.addText(SITE_IDENTIFIER, '[COLOR red]' + aEntry[0]", "aResult[0]: sDesc = aResult[1][0] sDesc = sDesc.replace('&#8217;', '\\'').replace('&#8230;', '...') except: pass sPattern =", "__checkForNextPage(sHtmlContent) if (sNextPage != False): oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sNextPage) oGui.addNext(SITE_IDENTIFIER, 'showMovies', '[COLOR", "# On remet en utf-8 # Réécriture de sHtmlContent pour prendre les liens", "(URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_NEWS = (URL_MAIN + 'category/series-tv/', 'showMovies') SERIE_LIST = (True,", "sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) else: oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)", "in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl) oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler) oGui.setEndOfDirectory()", "'').replace(' [Telechargement]', '') sDisplayTitle = sTitle # on retire la qualité sTitle =", "la qualité sHtmlContent = sHtmlContent.replace('<span style=\"color: #ff9900;\"><strong>', '<strong><span style=\"color: #ff9900;\">') oParser = cParser()", "= '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sPattern =", "de sHtmlContent pour prendre les liens et pour récuperer le dernier episode sHtmlContent", "+ 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', sUrl)", "style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent = sHtmlContent.replace('<b></b>',", "URL_MAIN + 'category/series-tv/v-w-x-y-z/'] ) for sTitle, sUrl in liste: oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl',", "for aEntry in aResult: sHosterUrl = aEntry oHoster = cHosterGui().checkHoster(sHosterUrl) if (oHoster !=", "URL_MAIN + 'category/series-tv/j-k-l/'] ) liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN", "cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb')", "['Policier', URL_MAIN + 'category/films/policier/'] ) liste.append( ['Romance', URL_MAIN + 'category/films/romance/'] ) liste.append( ['Science-Fiction',", "def showMoviesSearch(): oGui = cGui() sSearchText = oGui.showKeyBoard() if (sSearchText != False): sUrl", "'<span style=\"color: #33cccc;[^<>\"]*\">(?:<(?:strong|b)>)((?:Stream|Telec)[^<>]+)|\"center\">(.pisode[^<]{2,12})*<(?!\\/a>)([^<>]*a href=\"http.+?)(?:<.p>|<br|<.div)' aResult = oParser.parse(sHtmlContent, sPattern) # astuce en cas d'episode", ") liste.append( ['Documentaire', URL_MAIN + 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] )", "oOutputParameterHandler.addParameter('siteUrl', SERIE_VOSTFR[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_VOSTFR[1], 'Séries (VOSTFR)', 'vostfr.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', REPLAYTV_NEWS[0])", "sHtmlContent = sHtmlContent.replace('quelle-est-votre-serie-preferee', '<>') sHtmlContent = sHtmlContent.replace('top-series-du-moment', '<>') sHtmlContent = sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser", "['BLURAY 1080p/720p', URL_MAIN + 'category/films/bluray-1080p-720p/'] ) liste.append( ['BLURAY 3D', URL_MAIN + 'category/films/bluray-3d/'] )", ") liste.append( ['M-N-O', URL_MAIN + 'category/series-tv/m-n-o/'] ) liste.append( ['P-Q-R', URL_MAIN + 'category/series-tv/p-q-r/'] )", "le double affichage des saisons sTitle = re.sub(' - Saison \\d+', '', sMovieTitle)", "sTitle = sMovieTitle + ' ' + aEntry[1].replace(' New', '') sDisplayTitle = sTitle", "True): for aEntry in aResult[1]: sHosterUrl = aEntry # pour récuperer tous les", "'+') sPattern = '<div class=\"post-thumbnail\".+?<a href=\"([^\"]+)\".+?(?:src=\"([^\"]+(?:png|jpeg|jpg)|)\").+?alt=\"([^\"]+)\"' else: oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl')", "a rien a afficher c'est peut etre une serie if (len(aResult) == 0)", "'[/COLOR]') else: # Saisons et episodes sUrl = aEntry[2] SXXEX = re.search('>(S[0-9]{2}E[0-9]{2})<', sUrl)", "oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) if not sSearch: sNextPage =", "URL_MAIN + 'category/films/arts-martiaux/'] ) liste.append( ['Aventure', URL_MAIN + 'category/films/aventure-films/'] ) liste.append( ['Biopic', URL_MAIN", "oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb = oInputParameterHandler.getValue('sThumb') oRequestHandler = cRequestHandler(sUrl) sHtmlContent = oRequestHandler.request()", "cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0]) oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler) oOutputParameterHandler = cOutputParameterHandler()", "sHtmlContent.replace('listes-des-series-annulees-et-renouvelees', '<>') oParser = cParser() aResult = oParser.parse(sHtmlContent, sPattern) if (aResult[0] == False):", "href') sHtmlContent = sHtmlContent.replace('https://cut-urls.com/st?api=d6e46f2fcd4bfed906a9f3ecbbb6830e862b3afb&amp;url=', '') # récupération du Synopsis sDesc = '' try:", "sHtmlContent = oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent =", "= oRequestHandler.request() # Réécriture de sHtmlContent pour récuperer la qualité sHtmlContent = sHtmlContent.replace('<span", "oGui = cGui() oInputParameterHandler = cInputParameterHandler() sUrl = oInputParameterHandler.getValue('siteUrl') sMovieTitle = oInputParameterHandler.getValue('sMovieTitle') sThumb", "les liens if '&url=' in sHosterUrl: sHosterUrl = sHosterUrl.split('&url=')[1] # pour récuperer le", "from resources.lib.handler.requestHandler import cRequestHandler from resources.lib.parser import cParser from resources.lib.util import cUtil from", "'', sThumb, sDesc, oOutputParameterHandler) progress_.VSclose(progress_) oGui.setEndOfDirectory() def showHosters(sLoop = False): oGui = cGui()", "oGui.addText(SITE_IDENTIFIER) if (aResult[0] == True): total = len(aResult[1]) progress_ = progress().VScreate(SITE_NAME) for aEntry", "cGui() oOutputParameterHandler = cOutputParameterHandler() oOutputParameterHandler.addParameter('siteUrl', 'http://venom/') oGui.addDir(SITE_IDENTIFIER, 'showMoviesSearch', 'Recherche', 'search.png', oOutputParameterHandler) oOutputParameterHandler =", "= sHtmlContent.replace('<b></b>', ' ') sHtmlContent = sHtmlContent.replace('<span class=\"su-lightbox\" data-mfp-src', '<a href') sHtmlContent =", "= sHtmlContent.replace('<span style=\"color: #ff9900;\">New</span><b> </b>', '') sHtmlContent = sHtmlContent.replace('<b> </b>', ' ') sHtmlContent", "+ 'category/documentaire/'] ) liste.append( ['Drame', URL_MAIN + 'category/films/drame/'] ) liste.append( ['Espionnage', URL_MAIN +" ]
[ "255], dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV)", "get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region) return", "min_HSV = np.array([0, 58, 30], dtype = \"uint8\") max_HSV = np.array([33, 255, 255],", "plt min_HSV = np.array([0, 58, 30], dtype = \"uint8\") max_HSV = np.array([33, 255,", "cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image)", "skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region) return image_HSV,", "image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR", "def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region)", "np.array([0, 58, 30], dtype = \"uint8\") max_HSV = np.array([33, 255, 255], dtype =", "= np.array([0, 58, 30], dtype = \"uint8\") max_HSV = np.array([33, 255, 255], dtype", "58, 30], dtype = \"uint8\") max_HSV = np.array([33, 255, 255], dtype = \"uint8\")", "max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask", "matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30], dtype = \"uint8\") max_HSV =", "import cv2 import numpy as np import matplotlib.pyplot as plt min_HSV = np.array([0,", "\"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV,", "= cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image):", "as plt min_HSV = np.array([0, 58, 30], dtype = \"uint8\") max_HSV = np.array([33,", "np import matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30], dtype = \"uint8\")", "= get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region) return image_HSV, np.hstack([image_BGR, image_HSV])", "max_HSV = np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image)", "cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV", "cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR,", "dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return", "import numpy as np import matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30],", "= np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV", "numpy as np import matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30], dtype", "import matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30], dtype = \"uint8\") max_HSV", "\"uint8\") max_HSV = np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR =", "get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR", "dtype = \"uint8\") max_HSV = np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image):", "= \"uint8\") max_HSV = np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR", "image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask = skin_region) return image_HSV, np.hstack([image_BGR,", "255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR,", "np.array([33, 255, 255], dtype = \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV =", "<filename>modules/skin_detection.py import cv2 import numpy as np import matplotlib.pyplot as plt min_HSV =", "= cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR =", "min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR,", "cv2 import numpy as np import matplotlib.pyplot as plt min_HSV = np.array([0, 58,", "def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV),", "30], dtype = \"uint8\") max_HSV = np.array([33, 255, 255], dtype = \"uint8\") def", "as np import matplotlib.pyplot as plt min_HSV = np.array([0, 58, 30], dtype =", "cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region,", "= \"uint8\") def get_skin_region(image): image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV,", "image_BGR = cv2.imread(image) image_HSV = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2HSV) return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def", "image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV = cv2.bitwise_and(image_BGR, image_BGR, mask =", "return cv2.inRange(image_HSV, min_HSV, max_HSV), image_BGR def get_skin(image): skin_region, image_BGR = get_skin_region(image) image_HSV =" ]
[ "\"head\", \"options\", \"trace\", ] pagination_class = None def initial(self, request, *args, **kwargs): super().initial(request,", "app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from", "self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\")", "import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import", "*args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs)", "validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK)", "build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets from rest_framework.decorators import action from", "q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if", "**kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try:", "rest_framework import status, viewsets from rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError", "if feature: identity_features = filter( lambda fs: fs.feature.id == feature, identity_features ) serializer", "Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents def get_permissions(self): return [", "environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions", "def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key =", "lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier:", "\"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get environment object from URL", "return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\")", "feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def perform_destroy(self, instance):", "get_environment_from_request(self): \"\"\" Get environment object from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"])", "request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature", "page_size, start_key, ) return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\":", "filter( lambda fs: fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return", "MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get environment object from URL parameters", "lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs):", "is not supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\",", "featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True)", "action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import", "\"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None def initial(self, request, *args,", ") return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\":", "\"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size", "trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\"", "lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound() return", "rest_framework.response import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer,", "initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity", "self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents def get_permissions(self): return [ IsAuthenticated(),", "data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field", "edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from", "list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features", "methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data:", "Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query =", "from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet):", "if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key", "[ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None def", "url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise", "] def get_environment_from_request(self): \"\"\" Get environment object from URL parameters in request. \"\"\"", "= trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True,", "self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, )", "== feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def perform_destroy(self,", "} ), ] def get_environment_from_request(self): \"\"\" Get environment object from URL parameters in", "feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs: fs.feature.id == feature,", "Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs):", "if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value(", "environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value(", "import base64 import json import typing import marshmallow from boto3.dynamodb.conditions import Key from", "from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework", "**kwargs) def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and", "import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema()", "pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier),", "{ \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request,", "import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import", "trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field =", "perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request,", "self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()},", "= self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, )", "def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features =", "Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import (", "= EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\":", "return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query", "from environments.identities.models import Identity from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from", "self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return", "request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment", "def get_environment_from_request(self): \"\"\" Get environment object from URL parameters in request. \"\"\" return", "filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound()", "= self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query:", "Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db:", "identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated,", "not supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ]", "start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items(", "return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents", "Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions", ") from environments.identities.models import Identity from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES", "environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions", "import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import", "from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict,", "*args, **kwargs) def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"')", ") except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs):", "[ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ]", "search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return", "search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key", "previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key =", "responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity =", ") -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"',", "return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size =", "build_identity_model, ) from rest_framework import status, viewsets from rest_framework.decorators import action from rest_framework.exceptions", "NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError", "search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query )", "return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES,", "@swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity", "), ] def get_environment_from_request(self): \"\"\" Get environment object from URL parameters in request.", "return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ),", "= filter( lambda fs: fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features, many=True)", "} def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise", "start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier,", "request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args,", "instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args,", "data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, )", ") serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def perform_destroy(self, instance): self.identity.identity_features.remove(instance) Identity.dynamo_wrapper.put_item(build_identity_dict(self.identity))", "search_func, page_size, start_key, ) return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={", "from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema", "json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier =", "identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents def get_permissions(self):", "from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class", "raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error:", "\"\"\" Get environment object from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def", "NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params", "self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key", "import status, viewsets from rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError from", "from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from", "data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features =", "action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\"", "build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs:", "@swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features", "Get environment object from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self,", "search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents", "rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from", "responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment =", "= { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self,", "MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get environment", "search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def", ") from rest_framework import status, viewsets from rest_framework.decorators import action from rest_framework.exceptions import", "self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if", "= self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self,", "identity_features = filter( lambda fs: fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features,", "import IsAuthenticated from rest_framework.response import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import", "featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid,", "swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from", "featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self,", "EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier:", "get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None", "status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer", "get_traits(self, request, *args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data,", "serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\":", "\"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request()", "projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class", "import ( build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets from rest_framework.decorators import", "parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)},", "search_identifier, search_func, page_size, start_key, ) return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions(", "= self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key))", "[IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported", "q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs: fs.feature.id == feature, identity_features )", "== featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def", "EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request()", "lambda fs: fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data,", "= APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\"", "IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names", "from rest_framework import status, viewsets from rest_framework.decorators import action from rest_framework.exceptions import NotFound,", "object from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"])", "get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\")", "Patch is not supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\",", "Identity from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions", "\"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None def initial(self, request, *args, **kwargs):", "\"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"],", "serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names = [ \"get\", \"post\",", "http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class =", "= Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents def get_permissions(self): return", "def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] )", "Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer,", "import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from app.pagination", ") identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return identity_documents def", "-> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\")", "**kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid =", "request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity =", "Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def", "import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models", "identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs:", "from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema(", "**kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\",", "def get_traits(self, request, *args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return", "NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from app.pagination import", "class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions =", "def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self,", "import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import", "self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs: fs.feature.id ==", "identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment", "\"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None def initial(self, request,", "= self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200:", "@action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not", "= build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from", "request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True,", "identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error)", "= [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None", "EdgeIdentityTraitsSerializer(many=True)}, ) @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity = self.get_object()", "return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params )", "environments.identities.models import Identity from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions", "import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import", "initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request,", "@action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity = self.get_object() data =", "and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self):", "ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from app.pagination import EdgeIdentityPagination", "*args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object())", "identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\":", "return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class", "from rest_framework.response import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer,", "\"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class = None def initial(self,", "EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = {", "url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True)", "environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as", "lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if", "**kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def", "base64 import json import typing import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils", "def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'):", "\"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] )", "= self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait =", "IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class", ") @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if", "viewsets from rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import", "update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity", "= json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier", "in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200: EdgeIdentityTraitsSerializer(many=True)}, )", "= next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration:", "method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args, **kwargs):", "trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data =", "search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self):", "from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from", "drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model,", "import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models", "previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key )", "\"options\", \"trace\", ] pagination_class = None def initial(self, request, *args, **kwargs): super().initial(request, *args,", "self.identity.identity_features, ) ) except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request,", "from rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated", "raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable,", "next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise", "EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda", "self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query:", "validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes =", "import action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response", "def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES,", "except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait)", "= \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier),", "if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str,", "self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data)", "Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self,", "trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"],", "return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404(", ") search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func,", "None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size,", "except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer", "APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions", "environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions", ") q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter(", "self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier(", "= None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"],", "( build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets from rest_framework.decorators import action", "EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features", "= None def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404(", "IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def", "not environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str, )", "page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if", "get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, }", "from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from", "def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.enable_dynamo_db: raise DynamoNotEnabledError()", "= EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names = [ \"get\", \"post\", \"put\",", "None def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"]", "fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def", "flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework import", "] pagination_class = None def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document", "pagination_class = None def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs) identity_document =", "request, *args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK)", "**kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def", "self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate =", "identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer,", "from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response", "EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models import Environment from", "typing import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas", "def update_traits(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError()", "IsAuthenticated from rest_framework.response import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import (", "], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def", "trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity))", "= q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs: fs.feature.id == feature, identity_features", "identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK) def perform_destroy(self, instance): self.identity.identity_features.remove(instance)", "DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer", "self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[", "\"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get environment object", "methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"],", "boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders", "rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers", "ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet):", "EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\",", "rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions import IsAuthenticated from", "dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def", "URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance): Identity.dynamo_wrapper.delete_item(instance[\"composite_key\"]) @swagger_auto_schema( responses={200:", "= self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda fs: fs.feature.id", "if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query", "\"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get", "identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment = self.get_environment_from_request() if not", "raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class", "\"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args,", "not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError", "*args, **kwargs): super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document)", "try: featurestate = next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) )", "EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models import Environment from environments.permissions.constants import", "typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return", "from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, )", "*args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature =", "from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity", "StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer =", "raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args, **kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer(", "Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions]", "search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\"", "EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models import Environment", "lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names =", "fs: fs.feature.id == feature, identity_features ) serializer = self.get_serializer(identity_features, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK)", "str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"],", "= Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try:", "( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models import", "import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination", "import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class =", "= self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key:", "json import typing import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema", "EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch", "search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key,", "start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func,", "fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer())", "from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from app.pagination import EdgeIdentityPagination from", "\"trace\", ] pagination_class = None def initial(self, request, *args, **kwargs): super().initial(request, *args, **kwargs)", ") self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next(", "from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets from", "many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\")", "self.dynamo_identifier_search_functions[ \"EQUAL\" ], search_query.replace('\"', \"\") return self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"]", "super().initial(request, *args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self):", "EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import", "APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets", "featurestate = next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except", "features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from .exceptions import TraitPersistenceError trait_schema =", "TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field", "start_key, ) return identity_documents def get_permissions(self): return [ IsAuthenticated(), NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES,", "environment object from URL parameters in request. \"\"\" return Environment.objects.get(api_key=self.kwargs[\"environment_api_key\"]) def perform_destroy(self, instance):", "from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes", "page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"],", "Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate", ".exceptions import TraitPersistenceError trait_schema = APITraitSchema() class EdgeIdentityViewSet(viewsets.ModelViewSet): serializer_class = EdgeIdentitySerializer pagination_class =", "marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return", "import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, )", "environment.project.enable_dynamo_db: raise DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str, ) ->", "**kwargs): q_params_serializer = EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\")", "str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return self.dynamo_identifier_search_functions[ \"EQUAL\" ],", ") ) except StopIteration: raise NotFound() return featurestate @swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer()) def list(self, request, *args,", "import APITraitSchema from flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework import status,", "Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class =", "environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait", "identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"]", "_get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if search_query.startswith('\"') and search_query.endswith('\"'): return", "if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except", "= EdgeIdentitySerializer pagination_class = EdgeIdentityPagination lookup_field = \"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda", "= trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data", ") def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key", "EdgeIdentitySerializer, EdgeIdentityTraitsSerializer, ) from environments.identities.models import Identity from environments.models import Environment from environments.permissions.constants", ") @action(detail=True, methods=[\"get\"], url_path=\"list-traits\") def get_traits(self, request, *args, **kwargs): identity = self.get_object() data", "q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature: identity_features = filter( lambda", "permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is", "*args, **kwargs): identity = self.get_object() data = trait_schema.dump(identity[\"identity_traits\"], many=True) return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema(", "= build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda", "fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features, ) ) except StopIteration: raise NotFound() return featurestate", "not search_query: return Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query", "self.dynamo_identifier_search_functions[\"BEGINS_WITH\"], search_query def get_object(self): return Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"identity_uuid\"] ) def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request)", "import Identity from environments.models import Environment from environments.permissions.constants import MANAGE_IDENTITIES from environments.permissions.permissions import", "from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError from", "feature: identity_features = filter( lambda fs: fs.feature.id == feature, identity_features ) serializer =", "@swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request, *args,", "try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait])", "status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def update_traits(self, request,", "# Patch is not supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\",", "import typing import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from", "self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter(", "get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs: fs.featurestate_uuid ==", "def get_object(self): featurestate_uuid = self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs: fs.featurestate_uuid", "as validation_error: raise ValidationError(validation_error) from validation_error identity.update_traits([trait]) Identity.dynamo_wrapper.put_item(build_identity_dict(identity)) data = trait_schema.dump(trait) return Response(data,", "class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer #", "self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size, start_key, ) return", "= self.kwargs[\"featurestate_uuid\"] try: featurestate = next( filter( lambda fs: fs.featurestate_uuid == featurestate_uuid, self.identity.identity_features,", "search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents = Identity.dynamo_wrapper.search_items_with_identifier( self.kwargs[\"environment_api_key\"], search_identifier, search_func, page_size,", "*args, **kwargs) identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404( self.kwargs[\"edge_identity_identity_uuid\"] ) self.identity = build_identity_model(identity_document) def get_object(self): featurestate_uuid", "search_query = self.request.query_params.get(\"q\") start_key = None if previous_last_evaluated_key: start_key = json.loads(base64.b64decode(previous_last_evaluated_key)) if not", "NestedEnvironmentPermissions( action_permission_map={ \"retrieve\": MANAGE_IDENTITIES, \"get_traits\": MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self):", "\"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names = [ \"get\",", "MANAGE_IDENTITIES, \"update_traits\": MANAGE_IDENTITIES, } ), ] def get_environment_from_request(self): \"\"\" Get environment object from", "Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), } def initial(self, request, *args, **kwargs): environment =", "= EdgeIdentityFsQueryparamSerializer( data=self.request.query_params ) q_params_serializer.is_valid(raise_exception=True) identity_features = self.identity.identity_features feature = q_params_serializer.data.get(\"feature\") if feature:", "import json import typing import marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import", "= [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field = \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not", "marshmallow from boto3.dynamodb.conditions import Key from drf_yasg2.utils import swagger_auto_schema from flag_engine.api.schemas import APITraitSchema", "super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]: if", "DynamoNotEnabledError() super().initial(request, *args, **kwargs) def _get_search_function_and_value( self, search_query: str, ) -> typing.Tuple[typing.Callable, str]:", "request, *args, **kwargs): environment = self.get_environment_from_request() if not environment.project.organisation.persist_trait_data: raise TraitPersistenceError() identity =", "= \"featurestate_uuid\" serializer_class = EdgeIdentityFeatureStateSerializer # Patch is not supported http_method_names = [", "MANAGE_IDENTITIES from environments.permissions.permissions import NestedEnvironmentPermissions from features.permissions import IdentityFeatureStatePermissions from projects.exceptions import DynamoNotEnabledError", "\"identity_uuid\" dynamo_identifier_search_functions = { \"EQUAL\": lambda identifier: Key(\"identifier\").eq(identifier), \"BEGINS_WITH\": lambda identifier: Key(\"identifier\").begins_with(identifier), }", "supported http_method_names = [ \"get\", \"post\", \"put\", \"delete\", \"head\", \"options\", \"trace\", ] pagination_class", "TraitPersistenceError() identity = build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise", "= trait_schema.dump(trait) return Response(data, status=status.HTTP_200_OK) class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions] lookup_field =", "build_identity_model(self.get_object()) try: trait = trait_schema.load(request.data) except marshmallow.ValidationError as validation_error: raise ValidationError(validation_error) from validation_error", "def get_queryset(self): page_size = self.pagination_class().get_page_size(self.request) previous_last_evaluated_key = self.request.GET.get(\"last_evaluated_key\") search_query = self.request.query_params.get(\"q\") start_key =", "status, viewsets from rest_framework.decorators import action from rest_framework.exceptions import NotFound, ValidationError from rest_framework.permissions", "import Response from app.pagination import EdgeIdentityPagination from edge_api.identities.serializers import ( EdgeIdentityFeatureStateSerializer, EdgeIdentityFsQueryparamSerializer, EdgeIdentitySerializer,", "return Response(data=data, status=status.HTTP_200_OK) @swagger_auto_schema( method=\"put\", request_body=EdgeIdentityTraitsSerializer, responses={200: EdgeIdentityTraitsSerializer()}, ) @action(detail=True, methods=[\"put\"], url_path=\"update-traits\") def", "flag_engine.identities.builders import ( build_identity_dict, build_identity_model, ) from rest_framework import status, viewsets from rest_framework.decorators", "Identity.dynamo_wrapper.get_all_items( self.kwargs[\"environment_api_key\"], page_size, start_key ) search_func, search_identifier = self._get_search_function_and_value( search_query ) identity_documents =" ]
[ "front of each dict key :param epoch: optional int :param stats: {str: tensor}", "of accumulating the stats \"\"\" return {} def reset(self, key: str = None):", "dict keys, e.g. \"train\" or \"test\" :return: dictionary of string keys with corresponding", "logits: network outputs :param targets: output targets :return: dictionary of string keys with", "if they are gathered from distributed training or from different batches \"\"\" return", "\"\"\" new_logits = [] for tensor in logits + [targets]: shape = tensor.shape", ":return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric):", "list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active", "-> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor:", "tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors if they are gathered", "to_use = targets != ignore_target_index logits = [lg[to_use] for lg in logits] targets", "and loggers (e.g. tensorboard), all single results of _evaluate() are weighted averaged later,", "prediction instead \"\"\" # remove all occurrences where the target equals the ignore", "stats: {str: tensor} or {str: [tensor]} :return: usually empty dict if stats are", "self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir: str, key:", "arguments to add to argparse when this class (or a child class) is", "[torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs:", "targets = targets[to_use] # prevent logits from predicting an ignored class if ignore_prediction_index", "[batch, classes, n0, n1, ...] tensors into [batch, classes] :param logits: network outputs", "\"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze()", "ResultValue}: \"\"\" :param net: evaluated network :param inputs: network inputs :param logits: network", "lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits return logits, targets def get_accumulated_stats(self,", "int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key,", "__init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for k, v in kwargs.items():", "] def reset(self, key: str = None): \"\"\" reset tracked stats for a", "specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch:", "super().__init__() self.head_weights = head_weights for k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self)", "result of accumulating the stats \"\"\" if stats is None: stats = self.get_accumulated_stats(key)", "[Argument]: \"\"\" list arguments to add to argparse when this class (or a", "cur = self._evaluate(net, inputs, logits, targets) # add all values to current stat", "into [batch, classes] :param logits: network outputs :param targets: output targets \"\"\" new_logits", "the averaged statistics for a specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir:", "defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue", "net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}:", "= lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits return logits, targets def", "all single results of _evaluate() are weighted averaged later, by how the batch", "specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\",", "k, v in stats.items()} if len(stats) > 0: if isinstance(epoch, int): save_dir =", "or \"test\" :return: dictionary of string keys with corresponding results \"\"\" with torch.no_grad():", "(prefix, key, name) return {'%s/%s' % (s, k): v for k, v in", "ignore_prediction_index] = min_ logits = new_logits return logits, targets def get_accumulated_stats(self, key: str)", "self.head_weights = head_weights for k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) ->", "targets \"\"\" new_logits = [] for tensor in logits + [targets]: shape =", "else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\"", "the result of accumulating the stats \"\"\" return {} def reset(self, key: str", "str, dct: dict) -> dict: \"\"\" adds key and name to all dict", "= defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def get_log_name(self) -> str: raise", "a specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\",", "sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to add to", "first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict)", "> 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key,", "k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last", "\"\"\" reset tracked stats for a specific key, or all (if key ==", "keys with corresponding [scalar] tensors \"\"\" if not self.is_active: return {} with torch.no_grad():", "lg[:, ignore_prediction_index] = min_ logits = new_logits return logits, targets def get_accumulated_stats(self, key:", "logits: network outputs :param targets: output targets :param key: prefix for the dict", "= None): \"\"\" reset tracked stats for a specific key, or all (if", "string keys with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) ->", "classes] :param logits: network outputs :param targets: output targets \"\"\" new_logits = []", "{str: torch.Tensor}: \"\"\" get the averaged statistics for a specific key \"\"\" return", "this metric :param data_set: data set that is evaluated on :param head_weights: how", "% (prefix, key, name) return {'%s/%s' % (s, k): v for k, v", "is evaluated on :param head_weights: how each head is weighted \"\"\" all_parsed =", "if the network predicts this index, choose the next most-likely prediction instead \"\"\"", "of string keys with corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork,", "@classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to add to argparse", "prefix: str, name: str, dct: dict) -> dict: \"\"\" adds key and name", "dictionary of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\"", "self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir: str, key: str, stats:", ":param prefix: string prefix added in front of each dict key :param epoch:", "for a specific key, or all (if key == None) \"\"\" pass def", "in stats.items()} if len(stats) > 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' %", "_batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes,", "str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated network :param inputs: network inputs", "\"test\" :return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def", "k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError @classmethod", "torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param net:", "help='visualize each n epochs, only last if <=0'), ] def reset(self, key: str", "where the target matches this index :param ignore_prediction_index: if the network predicts this", "-> dict: \"\"\" visualize/log this metric :param save_dir: if stats are visualized, where", "'%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir,", "if <=0'), ] def reset(self, key: str = None): \"\"\" reset tracked stats", "network outputs and some targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights", "single result \"\"\" def get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork,", "self.stats[key][k] = v.value return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "else v for k, v in stats.items()} if len(stats) > 0: if isinstance(epoch,", "(supervised) network training, between network outputs and some targets \"\"\" def __init__(self, head_weights:", "{k: self._combine_tensors(k, v) if isinstance(v, list) else v for k, v in stats.items()}", "index, choose the next most-likely prediction instead \"\"\" # remove all occurrences where", "self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace,", "specific key, or all (if key == None) \"\"\" pass def on_epoch_start(self, epoch:", "save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix,", "n epochs, only last if <=0'), ] def reset(self, key: str = None):", "all (if key == None) \"\"\" keys = [key] if isinstance(key, str) else", "results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise", "def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs =", "they are gathered from distributed training or from different batches \"\"\" return sum(tensors)", "dict: \"\"\" compute this metric \"\"\" return {} def _viz_stats(self, save_dir: str, key:", "or {str: [tensor]} :return: usually empty dict if stats are visualized, otherwise the", "ignore_target_index: remove all samples where the target matches this index :param ignore_prediction_index: if", "else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else v", "[key] if isinstance(key, str) else list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self,", "= [lg[to_use] for lg in logits] targets = targets[to_use] # prevent logits from", "lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits", "for the dict keys, e.g. \"train\" or \"test\" :return: dictionary of string keys", "set that is evaluated on :param head_weights: how each head is weighted \"\"\"", "usually empty dict if stats are visualized, otherwise the result of accumulating the", "return {} def reset(self, key: str = None): \"\"\" reset tracked stats for", "stats are visualized, where to save them :param key: key to log :param", "to combine tensors if they are gathered from distributed training or from different", "v for k, v in stats.items()} if len(stats) > 0: if isinstance(epoch, int):", "loggers (e.g. tensorboard), all single results of _evaluate() are weighted averaged later, by", "the stats \"\"\" return {} def reset(self, key: str = None): \"\"\" reset", "the [batch, classes] shape :param targets: output targets, has the [batch] shape :param", "from distributed training or from different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls,", "str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights:", "self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) ->", "torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) # add all values to current", "prefix added in front of each dict key :param epoch: optional int :param", "@classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from a", "logits: network outputs, each has the [batch, classes] shape :param targets: output targets,", "remove all samples where the target matches this index :param ignore_prediction_index: if the", "is_last or ((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs == 0))", "\"\"\" :param args: global arguments namespace :param index: index of this metric :param", "stats are visualized, otherwise the result of accumulating the stats \"\"\" return {}", "self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {} def _evaluate(self,", "shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor)", "for k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError", "get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int, data_set:", "targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1, ...]", "lg in logits] for lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] =", "== None) \"\"\" keys = [key] if isinstance(key, str) else list(self.stats.keys()) for k", "if stats are visualized, where to save them :param key: key to log", "def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for k, v in", "str, prefix: str, name: str, dct: dict) -> dict: \"\"\" adds key and", "def get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or", "reset(self, key: str = None): \"\"\" reset tracked stats for a specific key,", "if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value", "tensors into [batch, classes] :param logits: network outputs :param targets: output targets \"\"\"", "with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is", "->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the target equals the ignore", "statistics for a specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str,", "def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0)", "targets: output targets, has the [batch] shape :param ignore_target_index: remove all samples where", "\"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor)", "on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0) and", "otherwise the result of accumulating the stats \"\"\" if stats is None: stats", "result of accumulating the stats \"\"\" return {} def reset(self, key: str =", "return {} def _compute_stats(self, save_dir: str, key: str, stats: dict) -> dict: \"\"\"", "choose the next most-likely prediction instead \"\"\" # remove all occurrences where the", "this index :param ignore_prediction_index: if the network predicts this index, choose the next", "-> str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet,", "targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from a [batch, classes] tensor", "logits, targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged", "k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork,", "key to log :param prefix: string prefix added in front of each dict", "# add all values to current stat dict for k, v in cur.items():", "head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def", "-> 'AbstractMetric': \"\"\" :param args: global arguments namespace :param index: index of this", "targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param", "epoch: optional int :param stats: {str: tensor} or {str: [tensor]} :return: usually empty", "str, key: str, prefix=\"\", epoch: int = None, stats: dict = None) ->", "+ [ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last if <=0'),", "int = None, stats: dict = None) -> dict: \"\"\" visualize/log this metric", "all values to current stat dict for k, v in cur.items(): if k", "[scalar] tensors \"\"\" if not self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net,", "> 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def", "torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999)", "the dict keys, e.g. \"train\" or \"test\" :return: dictionary of string keys with", "tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1],", "in logits] targets = targets[to_use] # prevent logits from predicting an ignored class", "else: self.stats[key][k] = v.value return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str, name:", "is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0) and ((epoch + 1)", "= \"%s/%s\" % (key, name) if len(prefix) == 0 else \"%s/%s/%s\" % (prefix,", "where the target equals the ignore index, prevent logits from predicting an ignored", "cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] =", "default=-1, type=int, help='visualize each n epochs, only last if <=0'), ] def reset(self,", "of accumulating the stats \"\"\" if stats is None: stats = self.get_accumulated_stats(key) else:", "ignored class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg in logits]", "args: global arguments namespace :param index: index of this metric :param data_set: data", "key == None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self,", "get the averaged statistics for a specific key \"\"\" return self.stats.get(key, {}) def", "output stream and loggers (e.g. tensorboard), all single results of _evaluate() are weighted", "keys with corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", "prefix=\"\", epoch: int = None, stats: dict = None) -> dict: \"\"\" visualize/log", "dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net:", "network outputs :param targets: output targets \"\"\" new_logits = [] for tensor in", "**kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def get_log_name(self) ->", "with corresponding [scalar] tensors \"\"\" if not self.is_active: return {} with torch.no_grad(): cur", "only last if <=0'), ] def reset(self, key: str = None): \"\"\" reset", "with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates", "index :param ignore_prediction_index: if the network predicts this index, choose the next most-likely", "import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between", "visualized, where to save them :param key: key to log :param prefix: string", "torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs:", "are weighted averaged later, by how the batch sizes of each single result", "averaged later, by how the batch sizes of each single result \"\"\" def", "@classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine", "-> torch.Tensor: \"\"\" how to combine tensors if they are gathered from distributed", "string keys with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric", "all samples where the target matches this index :param ignore_prediction_index: if the network", "_combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors if", "-> [Argument]: \"\"\" list arguments to add to argparse when this class (or", "statistics for a specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key:", "results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur = {k:", "keys, e.g. \"train\" or \"test\" :return: dictionary of string keys with corresponding results", "the target equals the ignore index if ignore_target_index >= 0: to_use = targets", "to current stat dict for k, v in cur.items(): if k in self.stats[key]:", "for lg in logits] for lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index]", "_evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\"", "targets: output targets :return: dictionary of string keys with corresponding results \"\"\" raise", "gathered from distributed training or from different batches \"\"\" return sum(tensors) @classmethod def", "is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each n", "epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "metric that is logged epoch-wise to the output stream and loggers (e.g. tensorboard),", "equals the ignore index, prevent logits from predicting an ignored class :param logits:", "prevent logits from predicting an ignored class :param logits: network outputs, each has", "network outputs, each has the [batch, classes] shape :param targets: output targets, has", "logits] for lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits", "of string keys with corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs,", "for a specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key:", "from a [batch, classes] tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1)", "return {} def _viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict): \"\"\"", "for k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net:", "dictionary of string keys with corresponding [scalar] tensors \"\"\" if not self.is_active: return", "\"train\" or \"test\" :return: dictionary of string keys with corresponding results \"\"\" raise", "ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised)", "key: str) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network", "list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active =", "key: str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated network :param inputs: network", "collections import defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result", "dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape", "if not self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets)", "with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else v for", "self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {}", "new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from", "how to combine tensors if they are gathered from distributed training or from", "\"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for k, v", "to all dict entries \"\"\" s = \"%s/%s\" % (key, name) if len(prefix)", "\"\"\" return {} def reset(self, key: str = None): \"\"\" reset tracked stats", "raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first \"\"\" def", "distributed training or from different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None)", "not self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) #", "a [batch, classes] tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return", "[torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1,", "def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch,", "this class (or a child class) is chosen \"\"\" return super().args_to_add(index) + [", "prevent logits from predicting an ignored class if ignore_prediction_index >= 0: new_logits =", "None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if", "dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\", "if isinstance(key, str) else list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch:", "arguments namespace :param index: index of this metric :param data_set: data set that", "string keys with corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs:", "isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return", "AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface):", "from predicting an ignored class :param logits: network outputs, each has the [batch,", "this metric :param save_dir: if stats are visualized, where to save them :param", "torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated network :param inputs:", "samples where the target matches this index :param ignore_prediction_index: if the network predicts", "remove all occurrences where the target equals the ignore index if ignore_target_index >=", "(key, name) if len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key, name) return", "with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze() for", "corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\"", "each n epochs, only last if <=0'), ] def reset(self, key: str =", "dict) -> dict: \"\"\" compute this metric \"\"\" return {} def _viz_stats(self, save_dir:", "def _to_dict(cls, key: str, prefix: str, name: str, dct: dict) -> dict: \"\"\"", "raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how", "(or a child class) is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1,", "A metric that accumulates stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs):", "the stats \"\"\" if stats is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad():", "torch.Tensor): \"\"\" remove all occurrences where the target equals the ignore index, prevent", "self.is_active = False def get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key:", "output targets :return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError", "= self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {} def _evaluate(self, net:", "return {'%s/%s' % (s, k): v for k, v in dct.items()} @classmethod def", "if len(stats) > 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch)", ":param targets: output targets \"\"\" new_logits = [] for tensor in logits +", "ignore index, prevent logits from predicting an ignored class :param logits: network outputs,", "-> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "stat dict for k, v in cur.items(): if k in self.stats[key]: self.stats[key][k] =", "self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None,", "return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999,", "list arguments to add to argparse when this class (or a child class)", "= targets[to_use] # prevent logits from predicting an ignored class if ignore_prediction_index >=", "outputs :param targets: output targets :return: dictionary of string keys with corresponding results", "ignored class :param logits: network outputs, each has the [batch, classes] shape :param", "outputs :param targets: output targets :param key: prefix for the dict keys, e.g.", "how the batch sizes of each single result \"\"\" def get_log_name(self) -> str:", "torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from a [batch, classes] tensor \"\"\"", "\"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the", "inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param", "class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first \"\"\" def __init__(self, head_weights:", "v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError @classmethod def", "logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param", "in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\"", "save_dir: str, key: str, stats: dict) -> dict: \"\"\" compute this metric \"\"\"", "network outputs :param targets: output targets :param key: prefix for the dict keys,", "dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors if they", "return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to add", "name) if len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s'", "((epoch + 1) % self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", ">= 0: to_use = targets != ignore_target_index logits = [lg[to_use] for lg in", "ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the target equals", "string keys with corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits,", "\"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int =", ":param logits: network outputs :param targets: output targets :return: dictionary of string keys", "pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str)", "with corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur", "\"test\" :return: dictionary of string keys with corresponding [scalar] tensors \"\"\" if not", "dict: \"\"\" visualize/log this metric :param save_dir: if stats are visualized, where to", "each_epochs self.is_active = False def get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls,", "add to argparse when this class (or a child class) is chosen \"\"\"", "if len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' %", "self._combine_tensors(k, v) if isinstance(v, list) else v for k, v in stats.items()} if", "AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between network outputs and some targets", "cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str, name: str, dct: dict)", "save them :param key: key to log :param prefix: string prefix added in", "inputs, logits, targets) cur = {k: v.unsqueeze() for k, v in cur.items()} return", "data_set: data set that is evaluated on :param head_weights: how each head is", "entries \"\"\" s = \"%s/%s\" % (key, name) if len(prefix) == 0 else", "def get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor])", "keys = [key] if isinstance(key, str) else list(self.stats.keys()) for k in keys: self.stats[k].clear()", "key, name) return {'%s/%s' % (s, k): v for k, v in dct.items()}", "self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0) and ((epoch + 1) %", "None) -> dict: \"\"\" visualize/log this metric :param save_dir: if stats are visualized,", "super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def get_log_name(self)", "from predicting an ignored class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for", "None, stats: dict = None) -> dict: \"\"\" visualize/log this metric :param save_dir:", "key: str = None): \"\"\" reset tracked stats for a specific key, or", "\"train\" or \"test\" :return: dictionary of string keys with corresponding [scalar] tensors \"\"\"", "if stats is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k:", "= '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(),", "are visualized, otherwise the result of accumulating the stats \"\"\" if stats is", "index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global arguments", "return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str, name: str, dct:", "str) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network inputs", "epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return", "them :param key: key to log :param prefix: string prefix added in front", "{} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str:", "result \"\"\" def get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs:", "for tensor in logits + [targets]: shape = tensor.shape if len(shape) > 2:", "v.unsqueeze() for k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self,", "n0, n1, ...] tensors into [batch, classes] :param logits: network outputs :param targets:", "[] for tensor in logits + [targets]: shape = tensor.shape if len(shape) >", "different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list", "key, stats)) return {} def _compute_stats(self, save_dir: str, key: str, stats: dict) ->", "(e.g. tensorboard), all single results of _evaluate() are weighted averaged later, by how", "for a specific key, or all (if key == None) \"\"\" keys =", "% self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "targets :return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def", "_to_dict(cls, key: str, prefix: str, name: str, dct: dict) -> dict: \"\"\" adds", "for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active =", "training, between network outputs and some targets \"\"\" def __init__(self, head_weights: list, **kwargs):", "\"\"\" s = \"%s/%s\" % (key, name) if len(prefix) == 0 else \"%s/%s/%s\"", "-> torch.Tensor: \"\"\" remove one-hot encoding from a [batch, classes] tensor \"\"\" if", "targets, has the [batch] shape :param ignore_target_index: remove all samples where the target", ":return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self,", "import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import", "v) if isinstance(v, list) else v for k, v in stats.items()} if len(stats)", "of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A", "last if <=0'), ] def reset(self, key: str = None): \"\"\" reset tracked", "import torch from collections import defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import", "else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' % (s, k): v for", "'AbstractMetric': \"\"\" :param args: global arguments namespace :param index: index of this metric", "from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace,", "new_logits = [] for tensor in logits + [targets]: shape = tensor.shape if", "logits] targets = targets[to_use] # prevent logits from predicting an ignored class if", "([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1, ...] tensors into [batch,", "self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics", "targets != ignore_target_index logits = [lg[to_use] for lg in logits] targets = targets[to_use]", "int :param stats: {str: tensor} or {str: [tensor]} :return: usually empty dict if", "net: evaluated network :param inputs: network inputs :param logits: network outputs :param targets:", "key :param epoch: optional int :param stats: {str: tensor} or {str: [tensor]} :return:", "None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork,", "= [] for tensor in logits + [targets]: shape = tensor.shape if len(shape)", "instead \"\"\" # remove all occurrences where the target equals the ignore index", "inputs: network inputs :param logits: network outputs :param targets: output targets :return: dictionary", "**kwargs): super().__init__() self.head_weights = head_weights for k, v in kwargs.items(): self.__setattr__(k, v) def", "NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first \"\"\" def __init__(self,", "def _compute_stats(self, save_dir: str, key: str, stats: dict) -> dict: \"\"\" compute this", "each dict key :param epoch: optional int :param stats: {str: tensor} or {str:", "tracked stats for a specific key, or all (if key == None) \"\"\"", "= tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return", "of this metric :param data_set: data set that is evaluated on :param head_weights:", "stats.items()} if len(stats) > 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir,", "the target matches this index :param ignore_prediction_index: if the network predicts this index,", "index=None) -> [Argument]: \"\"\" list arguments to add to argparse when this class", "self._evaluate(net, inputs, logits, targets) # add all values to current stat dict for", "A metric that is logged epoch-wise to the output stream and loggers (e.g.", "new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove", "in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls,", "logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated", "return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) ->", "pass def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", "\"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int", "str, stats: dict) -> dict: \"\"\" compute this metric \"\"\" return {} def", "{} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None, stats:", "False def get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors:", "key: str, prefix: str, name: str, dct: dict) -> dict: \"\"\" adds key", "2: return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor,", "corresponding [scalar] tensors \"\"\" if not self.is_active: return {} with torch.no_grad(): cur =", "the target equals the ignore index, prevent logits from predicting an ignored class", "name) return {'%s/%s' % (s, k): v for k, v in dct.items()} @classmethod", "{str: [tensor]} :return: usually empty dict if stats are visualized, otherwise the result", "this metric \"\"\" return {} def _viz_stats(self, save_dir: str, key: str, prefix: str,", "logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0,", "averaged statistics for a specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir:", "that is logged epoch-wise to the output stream and loggers (e.g. tensorboard), all", "in logits + [targets]: shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1],", "\"\"\" # remove all occurrences where the target equals the ignore index if", "return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir: str,", "results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "key: key to log :param prefix: string prefix added in front of each", "epoch: int = None, stats: dict = None) -> dict: \"\"\" visualize/log this", "a specific key, or all (if key == None) \"\"\" pass def on_epoch_start(self,", "occurrences where the target equals the ignore index if ignore_target_index >= 0: to_use", "raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise to the", "key: str, prefix=\"\", epoch: int = None, stats: dict = None) -> dict:", "e.g. \"train\" or \"test\" :return: dictionary of string keys with corresponding [scalar] tensors", "2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls,", "[batch, classes] tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets", "most-likely prediction instead \"\"\" # remove all occurrences where the target equals the", "torch.Tensor}: \"\"\" get the averaged statistics for a specific key \"\"\" return {}", "net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param", "= [key] if isinstance(key, str) else list(self.stats.keys()) for k in keys: self.stats[k].clear() def", "_compute_stats(self, save_dir: str, key: str, stats: dict) -> dict: \"\"\" compute this metric", "torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the target", "= None) -> dict: \"\"\" visualize/log this metric :param save_dir: if stats are", "is logged epoch-wise to the output stream and loggers (e.g. tensorboard), all single", "all dict entries \"\"\" s = \"%s/%s\" % (key, name) if len(prefix) ==", "== 2: return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets:", "def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\"", "metric :param save_dir: if stats are visualized, where to save them :param key:", "self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else", "= each_epochs self.is_active = False def get_log_name(self) -> str: raise NotImplementedError @classmethod def", "inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated", "optional int :param stats: {str: tensor} or {str: [tensor]} :return: usually empty dict", "dict if stats are visualized, otherwise the result of accumulating the stats \"\"\"", "if stats are visualized, otherwise the result of accumulating the stats \"\"\" if", "stats: dict = None) -> dict: \"\"\" visualize/log this metric :param save_dir: if", "targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics", "key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int", "= new_logits return logits, targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\"", "are visualized, otherwise the result of accumulating the stats \"\"\" return {} def", "logits, targets) # add all values to current stat dict for k, v", "index: index of this metric :param data_set: data set that is evaluated on", "def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) ->", "len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod", "empty dict if stats are visualized, otherwise the result of accumulating the stats", "or \"test\" :return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError", "training or from different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) ->", "k): v for k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets:", "if stats are visualized, otherwise the result of accumulating the stats \"\"\" return", "output targets, has the [batch] shape :param ignore_target_index: remove all samples where the", ":param inputs: network inputs :param logits: network outputs :param targets: output targets :param", "class :param logits: network outputs, each has the [batch, classes] shape :param targets:", "visualized, otherwise the result of accumulating the stats \"\"\" if stats is None:", "for k, v in stats.items()} if len(stats) > 0: if isinstance(epoch, int): save_dir", "for k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) ->", "accumulating the stats \"\"\" return {} def reset(self, key: str = None): \"\"\"", "(if key == None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass def", "cur = self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze() for k, v", "\"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only", "each has the [batch, classes] shape :param targets: output targets, has the [batch]", "n1, ...] tensors into [batch, classes] :param logits: network outputs :param targets: output", "ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg in logits] for lg in", "NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str:", "AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface,", "k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor],", "(if key == None) \"\"\" keys = [key] if isinstance(key, str) else list(self.stats.keys())", "stats: dict) -> dict: \"\"\" compute this metric \"\"\" return {} def _viz_stats(self,", "k, v in cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value])", "averaged statistics for a specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str,", "list, **kwargs): super().__init__() self.head_weights = head_weights for k, v in kwargs.items(): self.__setattr__(k, v)", ":return: dictionary of string keys with corresponding results \"\"\" with torch.no_grad(): cur =", "{}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None, stats:", "predicting an ignored class :param logits: network outputs, each has the [batch, classes]", "ignore_prediction_index: if the network predicts this index, choose the next most-likely prediction instead", "logits = [lg[to_use] for lg in logits] targets = targets[to_use] # prevent logits", "stats \"\"\" return {} def reset(self, key: str = None): \"\"\" reset tracked", "str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\"", "index, prevent logits from predicting an ignored class :param logits: network outputs, each", "key, or all (if key == None) \"\"\" keys = [key] if isinstance(key,", "keys with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that", "[self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {} def _evaluate(self, net: AbstractNetwork, inputs:", "1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor)", "get the averaged statistics for a specific key \"\"\" return {} def eval_accumulated_stats(self,", "logits: network outputs :param targets: output targets \"\"\" new_logits = [] for tensor", "torch.Tensor}: \"\"\" get the averaged statistics for a specific key \"\"\" return self.stats.get(key,", "\"\"\" if stats is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats =", "Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between network outputs and", "encoding from a [batch, classes] tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets,", "key: str, stats: dict) -> dict: \"\"\" compute this metric \"\"\" return {}", "[batch, classes] :param logits: network outputs :param targets: output targets \"\"\" new_logits =", "index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str, name: str,", "one-hot encoding from a [batch, classes] tensor \"\"\" if len(targets.shape) == 2: return", "targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for k,", ":param save_dir: if stats are visualized, where to save them :param key: key", "\"\"\" :param net: evaluated network :param inputs: network inputs :param logits: network outputs", "the batch sizes of each single result \"\"\" def get_log_name(self) -> str: raise", "logged epoch-wise to the output stream and loggers (e.g. tensorboard), all single results", "cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str:", "def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}:", "dict for k, v in cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k,", "\"\"\" list arguments to add to argparse when this class (or a child", "return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last", "1) % self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from a [batch,", "during (supervised) network training, between network outputs and some targets \"\"\" def __init__(self,", "def _viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict): \"\"\" visualize this", "outputs and some targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights =", "def get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int,", "class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise to the output stream", "and some targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights", "weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key:", "weighted averaged later, by how the batch sizes of each single result \"\"\"", "= False def get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str,", "in cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k]", "remove all occurrences where the target equals the ignore index, prevent logits from", "torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1, ...] tensors", "key: prefix for the dict keys, e.g. \"train\" or \"test\" :return: dictionary of", "log :param prefix: string prefix added in front of each dict key :param", "visualize/log this metric :param save_dir: if stats are visualized, where to save them", "return logits, targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the", "metric that accumulates stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights,", "prefix for the dict keys, e.g. \"train\" or \"test\" :return: dictionary of string", "current stat dict for k, v in cur.items(): if k in self.stats[key]: self.stats[key][k]", "= v.value return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets:", "# remove all occurrences where the target equals the ignore index if ignore_target_index", "with corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "{'%s/%s' % (s, k): v for k, v in dct.items()} @classmethod def _batchify_tensors(cls,", "if ignore_target_index >= 0: to_use = targets != ignore_target_index logits = [lg[to_use] for", "__init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs", "chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs,", "v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor):", "[ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last if <=0'), ]", "if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg in logits] for lg", ">= 0: new_logits = [lg.clone().detach_() for lg in logits] for lg in new_logits:", "def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors", "\"\"\" remove one-hot encoding from a [batch, classes] tensor \"\"\" if len(targets.shape) ==", "= self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze() for k, v in", ":param targets: output targets :return: dictionary of string keys with corresponding results \"\"\"", "self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def get_log_name(self) -> str:", ":return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def _evaluate(self,", "keys, e.g. \"train\" or \"test\" :return: dictionary of string keys with corresponding [scalar]", "% (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key,", "an ignored class :param logits: network outputs, each has the [batch, classes] shape", "target equals the ignore index if ignore_target_index >= 0: to_use = targets !=", "[torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors if they are gathered from", ":param targets: output targets, has the [batch] shape :param ignore_target_index: remove all samples", "to log :param prefix: string prefix added in front of each dict key", "on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats =", "the network predicts this index, choose the next most-likely prediction instead \"\"\" #", "return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor],", "predicts this index, choose the next most-likely prediction instead \"\"\" # remove all", "targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the", "the next most-likely prediction instead \"\"\" # remove all occurrences where the target", "name: str, dct: dict) -> dict: \"\"\" adds key and name to all", "how each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed)", "eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None, stats: dict =", "\"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str,", "is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key:", "targets) # add all values to current stat dict for k, v in", "a specific key, or all (if key == None) \"\"\" keys = [key]", "targets :param key: prefix for the dict keys, e.g. \"train\" or \"test\" :return:", "\"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs", "global arguments namespace :param index: index of this metric :param data_set: data set", "of each single result \"\"\" def get_log_name(self) -> str: raise NotImplementedError def evaluate(self,", "from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network", "head_weights for k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise", "evaluated on :param head_weights: how each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args,", "corresponding results \"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) # add all values to", "\"\"\" get the averaged statistics for a specific key \"\"\" return {} def", "\"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' % (s, k): v for k,", "cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "stats for a specific key, or all (if key == None) \"\"\" keys", "AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise to the output stream and", "[torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where", "epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0) and ((epoch", "add all values to current stat dict for k, v in cur.items(): if", "name to all dict entries \"\"\" s = \"%s/%s\" % (key, name) if", "ignore_target_index logits = [lg[to_use] for lg in logits] targets = targets[to_use] # prevent", "in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits return", "v) def get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index:", "# prevent logits from predicting an ignored class if ignore_prediction_index >= 0: new_logits", "all occurrences where the target equals the ignore index if ignore_target_index >= 0:", "== None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net:", "self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor,", "= self._evaluate(net, inputs, logits, targets) # add all values to current stat dict", "string prefix added in front of each dict key :param epoch: optional int", "dict key :param epoch: optional int :param stats: {str: tensor} or {str: [tensor]}", ":param inputs: network inputs :param logits: network outputs :param targets: output targets :return:", "new_logits = [lg.clone().detach_() for lg in logits] for lg in new_logits: min_ =", "the output stream and loggers (e.g. tensorboard), all single results of _evaluate() are", "+ 1) % self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during", "get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "-1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) ->", "targets: output targets :param key: prefix for the dict keys, e.g. \"train\" or", "epochs, only last if <=0'), ] def reset(self, key: str = None): \"\"\"", "logits from predicting an ignored class :param logits: network outputs, each has the", "targets[to_use] # prevent logits from predicting an ignored class if ignore_prediction_index >= 0:", ":param key: prefix for the dict keys, e.g. \"train\" or \"test\" :return: dictionary", "= min_ logits = new_logits return logits, targets def get_accumulated_stats(self, key: str) ->", "key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch:", "_ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all", "([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the target equals the ignore index,", "where to save them :param key: key to log :param prefix: string prefix", "((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs == 0)) def evaluate(self,", "ignore index if ignore_target_index >= 0: to_use = targets != ignore_target_index logits =", "self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs >", "{str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network inputs :param logits:", "data set that is evaluated on :param head_weights: how each head is weighted", "matches this index :param ignore_prediction_index: if the network predicts this index, choose the", "_remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding from a [batch, classes]", "[batch] shape :param ignore_target_index: remove all samples where the target matches this index", "batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments", "save_dir: if stats are visualized, where to save them :param key: key to", "Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between network outputs", "inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param", "of each dict key :param epoch: optional int :param stats: {str: tensor} or", "NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise to the output", "ignore_target_index >= 0: to_use = targets != ignore_target_index logits = [lg[to_use] for lg", "@classmethod def _to_dict(cls, key: str, prefix: str, name: str, dct: dict) -> dict:", "net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}:", "is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v)", "key: str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics for a specific", "evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str:", "uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics", "inputs: network inputs :param logits: network outputs :param targets: output targets :param key:", "that accumulates stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs)", "{} def _viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict): \"\"\" visualize", "_viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict): \"\"\" visualize this metric", "remove one-hot encoding from a [batch, classes] tensor \"\"\" if len(targets.shape) == 2:", "self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {} def _evaluate(self, net: AbstractNetwork,", "accumulates stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats", "\"test\" :return: dictionary of string keys with corresponding results \"\"\" with torch.no_grad(): cur", "0: new_logits = [lg.clone().detach_() for lg in logits] for lg in new_logits: min_", "results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first", "to add to argparse when this class (or a child class) is chosen", "\"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first \"\"\"", "_evaluate() are weighted averaged later, by how the batch sizes of each single", "for lg in logits] targets = targets[to_use] # prevent logits from predicting an", "!= ignore_target_index logits = [lg[to_use] for lg in logits] targets = targets[to_use] #", "torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else v for k,", "tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets @classmethod def", "super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last if", "v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs:", "\"\"\" reshape all [batch, classes, n0, n1, ...] tensors into [batch, classes] :param", "str) else list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False):", "keys with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str:", "key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def", "key and name to all dict entries \"\"\" s = \"%s/%s\" % (key,", "str, key: str, stats: dict) -> dict: \"\"\" compute this metric \"\"\" return", "= None, stats: dict = None) -> dict: \"\"\" visualize/log this metric :param", "import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class", "str = None): \"\"\" reset tracked stats for a specific key, or all", "results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get", "type=int, help='visualize each n epochs, only last if <=0'), ] def reset(self, key:", "logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences", ":param logits: network outputs :param targets: output targets :param key: prefix for the", "occurrences where the target equals the ignore index, prevent logits from predicting an", "sizes of each single result \"\"\" def get_log_name(self) -> str: raise NotImplementedError def", "metric \"\"\" return {} def _viz_stats(self, save_dir: str, key: str, prefix: str, stats:", "equals the ignore index if ignore_target_index >= 0: to_use = targets != ignore_target_index", "new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits return logits,", "else list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None)", "tensor} or {str: [tensor]} :return: usually empty dict if stats are visualized, otherwise", "in logits] for lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_", "**kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def", "with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}:", "are visualized, where to save them :param key: key to log :param prefix:", "the [batch] shape :param ignore_target_index: remove all samples where the target matches this", "otherwise the result of accumulating the stats \"\"\" return {} def reset(self, key:", "stats = {k: self._combine_tensors(k, v) if isinstance(v, list) else v for k, v", "list) else v for k, v in stats.items()} if len(stats) > 0: if", "class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between network outputs and some", "[lg.clone().detach_() for lg in logits] for lg in new_logits: min_ = lg.min(axis=1).values lg[:,", "key, or all (if key == None) \"\"\" pass def on_epoch_start(self, epoch: int,", "v in cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else:", "save_dir: str, key: str, prefix=\"\", epoch: int = None, stats: dict = None)", "the averaged statistics for a specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self,", ":return: usually empty dict if stats are visualized, otherwise the result of accumulating", "str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor,", "tensors \"\"\" if not self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net, inputs,", "{} def reset(self, key: str = None): \"\"\" reset tracked stats for a", "each single result \"\"\" def get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net:", "or from different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]:", "\"\"\" keys = [key] if isinstance(key, str) else list(self.stats.keys()) for k in keys:", "stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v,", "the ignore index if ignore_target_index >= 0: to_use = targets != ignore_target_index logits", "-> dict: \"\"\" compute this metric \"\"\" return {} def _viz_stats(self, save_dir: str,", "has the [batch, classes] shape :param targets: output targets, has the [batch] shape", "target matches this index :param ignore_prediction_index: if the network predicts this index, choose", "= {k: self._combine_tensors(k, v) if isinstance(v, list) else v for k, v in", ":param data_set: data set that is evaluated on :param head_weights: how each head", "e.g. \"train\" or \"test\" :return: dictionary of string keys with corresponding results \"\"\"", "torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze() for k,", "= self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k, v) if isinstance(v, list)", "\"\"\" visualize/log this metric :param save_dir: if stats are visualized, where to save", "all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix:", "lg in logits] targets = targets[to_use] # prevent logits from predicting an ignored", "args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args:", "shape :param targets: output targets, has the [batch] shape :param ignore_target_index: remove all", ":param logits: network outputs, each has the [batch, classes] shape :param targets: output", "from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param", "namespace :param index: index of this metric :param data_set: data set that is", "return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot", "or ((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs == 0)) def", "stats are visualized, otherwise the result of accumulating the stats \"\"\" if stats", "dict) -> dict: \"\"\" adds key and name to all dict entries \"\"\"", ":param stats: {str: tensor} or {str: [tensor]} :return: usually empty dict if stats", "v.value]) else: self.stats[key][k] = v.value return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", "batch sizes of each single result \"\"\" def get_log_name(self) -> str: raise NotImplementedError", "an ignored class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg in", "later, by how the batch sizes of each single result \"\"\" def get_log_name(self)", "and ((epoch + 1) % self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork, inputs:", "dictionary of string keys with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key:", "specific key, or all (if key == None) \"\"\" keys = [key] if", ":param logits: network outputs :param targets: output targets \"\"\" new_logits = [] for", "-> dict: \"\"\" adds key and name to all dict entries \"\"\" s", "...] tensors into [batch, classes] :param logits: network outputs :param targets: output targets", "next most-likely prediction instead \"\"\" # remove all occurrences where the target equals", "accumulating the stats \"\"\" if stats is None: stats = self.get_accumulated_stats(key) else: with", "isinstance(key, str) else list(self.stats.keys()) for k in keys: self.stats[k].clear() def on_epoch_start(self, epoch: int,", "the result of accumulating the stats \"\"\" if stats is None: stats =", "all [batch, classes, n0, n1, ...] tensors into [batch, classes] :param logits: network", "classes] tensor \"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets @classmethod", "== 0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key:", "of string keys with corresponding results \"\"\" raise NotImplementedError def get_accumulated_stats(self, key: str)", "evaluated network :param inputs: network inputs :param logits: network outputs :param targets: output", "reset tracked stats for a specific key, or all (if key == None)", "[batch, classes] shape :param targets: output targets, has the [batch] shape :param ignore_target_index:", "\"\"\" raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor)", "added in front of each dict key :param epoch: optional int :param stats:", "keys with corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets)", "class (or a child class) is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs',", "compute this metric \"\"\" return {} def _viz_stats(self, save_dir: str, key: str, prefix:", "reshape all [batch, classes, n0, n1, ...] tensors into [batch, classes] :param logits:", "@classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor) -> ([torch.Tensor], torch.Tensor): \"\"\" reshape all", "= targets != ignore_target_index logits = [lg[to_use] for lg in logits] targets =", "<=0'), ] def reset(self, key: str = None): \"\"\" reset tracked stats for", "None) \"\"\" keys = [key] if isinstance(key, str) else list(self.stats.keys()) for k in", "classes, n0, n1, ...] tensors into [batch, classes] :param logits: network outputs :param", "results of _evaluate() are weighted averaged later, by how the batch sizes of", "in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", "logits + [targets]: shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0,", "argparse when this class (or a child class) is chosen \"\"\" return super().args_to_add(index)", "v in stats.items()} if len(stats) > 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/'", "shape :param ignore_target_index: remove all samples where the target matches this index :param", "this index, choose the next most-likely prediction instead \"\"\" # remove all occurrences", "index of this metric :param data_set: data set that is evaluated on :param", "Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global", "def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to add to argparse when", "{str: tensor} or {str: [tensor]} :return: usually empty dict if stats are visualized,", "new_logits return logits, targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get", "import defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import", "outputs, each has the [batch, classes] shape :param targets: output targets, has the", "tensors if they are gathered from distributed training or from different batches \"\"\"", "return {} with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) # add all", "\"\"\" remove all occurrences where the target equals the ignore index, prevent logits", "\"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged epoch-wise to", "torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network inputs", "for k, v in cur.items(): if k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k],", "or all (if key == None) \"\"\" keys = [key] if isinstance(key, str)", "to the output stream and loggers (e.g. tensorboard), all single results of _evaluate()", "network inputs :param logits: network outputs :param targets: output targets :return: dictionary of", "= is_last or ((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs ==", "get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics for a", "all occurrences where the target equals the ignore index, prevent logits from predicting", "\"\"\" A metric that is logged epoch-wise to the output stream and loggers", "s = \"%s/%s\" % (key, name) if len(prefix) == 0 else \"%s/%s/%s\" %", "raise NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list)", "int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global arguments namespace", "-> {str: torch.Tensor}: \"\"\" :param net: evaluated network :param inputs: network inputs :param", "targets) cur = {k: v.unsqueeze() for k, v in cur.items()} return self._to_dict(key, \"\",", "corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats", "Argument('each_epochs', default=-1, type=int, help='visualize each n epochs, only last if <=0'), ] def", "= {k: v.unsqueeze() for k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur)", "\"\"\" get the averaged statistics for a specific key \"\"\" return self.stats.get(key, {})", "None): \"\"\" reset tracked stats for a specific key, or all (if key", "\"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs:", "ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training, between network", "v for k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor], targets: torch.Tensor)", ":param args: global arguments namespace :param index: index of this metric :param data_set:", "\"\"\" A metric that accumulates stats first \"\"\" def __init__(self, head_weights: list, each_epochs=-1,", "= cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str,", "of _evaluate() are weighted averaged later, by how the batch sizes of each", "dct: dict) -> dict: \"\"\" adds key and name to all dict entries", ":param ignore_prediction_index: if the network predicts this index, choose the next most-likely prediction", "def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None, stats: dict", "raise NotImplementedError def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged", "get_log_name(self) -> str: raise NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) ->", "of string keys with corresponding [scalar] tensors \"\"\" if not self.is_active: return {}", "class) is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize each", "def on_epoch_start(self, epoch: int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits:", "logits from predicting an ignored class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_()", "\"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to", "raise NotImplementedError def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) ->", "= [lg.clone().detach_() for lg in logits] for lg in new_logits: min_ = lg.min(axis=1).values", "save_dir: str, key: str, prefix: str, stats: dict): \"\"\" visualize this metric \"\"\"", "from collections import defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from", "(s, k): v for k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits: [torch.Tensor],", ":return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric):", "logits = new_logits return logits, targets def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}:", "the ignore index, prevent logits from predicting an ignored class :param logits: network", ":param key: key to log :param prefix: string prefix added in front of", "for a specific key \"\"\" return {} def eval_accumulated_stats(self, save_dir: str, key: str,", "self.each_epochs = each_epochs self.is_active = False def get_log_name(self) -> str: raise NotImplementedError @classmethod", "= head_weights for k, v in kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str:", "0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix,", "has the [batch] shape :param ignore_target_index: remove all samples where the target matches", "output targets \"\"\" new_logits = [] for tensor in logits + [targets]: shape", "0: to_use = targets != ignore_target_index logits = [lg[to_use] for lg in logits]", "targets :return: dictionary of string keys with corresponding results \"\"\" raise NotImplementedError class", "cur = {k: v.unsqueeze() for k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(),", "key == None) \"\"\" keys = [key] if isinstance(key, str) else list(self.stats.keys()) for", ":param ignore_target_index: remove all samples where the target matches this index :param ignore_prediction_index:", "stats \"\"\" if stats is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats", "len(stats) > 0: if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir,", "**all_parsed) @classmethod def _to_dict(cls, key: str, prefix: str, name: str, dct: dict) ->", "1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor:", "combine tensors if they are gathered from distributed training or from different batches", "torch from collections import defaultdict from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork", "def get_accumulated_stats(self, key: str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics for", "are gathered from distributed training or from different batches \"\"\" return sum(tensors) @classmethod", "torch.Tensor}: \"\"\" :param net: evaluated network :param inputs: network inputs :param logits: network", "class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg in logits] for", "network inputs :param logits: network outputs :param targets: output targets :param key: prefix", "or \"test\" :return: dictionary of string keys with corresponding [scalar] tensors \"\"\" if", "and name to all dict entries \"\"\" s = \"%s/%s\" % (key, name)", "AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global arguments namespace :param index:", "in front of each dict key :param epoch: optional int :param stats: {str:", "logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param net: evaluated", "output targets :param key: prefix for the dict keys, e.g. \"train\" or \"test\"", "self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {} def", "+ [targets]: shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1))", "self.is_active = is_last or ((self.each_epochs > 0) and ((epoch + 1) % self.each_epochs", "for lg in new_logits: min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits =", "stats for a specific key, or all (if key == None) \"\"\" pass", "-> {str: torch.Tensor}: \"\"\" get the averaged statistics for a specific key \"\"\"", "len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor],", "\"\"\" def get_log_name(self) -> str: raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor,", "each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod", ":return: dictionary of string keys with corresponding [scalar] tensors \"\"\" if not self.is_active:", "uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument", "\"\"\" if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls,", "prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self,", "from uninas.training.result import ResultValue from uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\"", "torch.Tensor: \"\"\" remove one-hot encoding from a [batch, classes] tensor \"\"\" if len(targets.shape)", "a specific key \"\"\" return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str,", "to argparse when this class (or a child class) is chosen \"\"\" return", "% (s, k): v for k, v in dct.items()} @classmethod def _batchify_tensors(cls, logits:", "def reset(self, key: str = None): \"\"\" reset tracked stats for a specific", "targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network", "string keys with corresponding [scalar] tensors \"\"\" if not self.is_active: return {} with", "corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) cur =", "torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param net:", "@classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\"", "tensor in logits + [targets]: shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0,", "args_to_add(cls, index=None) -> [Argument]: \"\"\" list arguments to add to argparse when this", "{} with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) # add all values", "{} def _compute_stats(self, save_dir: str, key: str, stats: dict) -> dict: \"\"\" compute", "int, is_last=False): pass def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor,", "classes] shape :param targets: output targets, has the [batch] shape :param ignore_target_index: remove", "kwargs.items(): self.__setattr__(k, v) def get_log_name(self) -> str: raise NotImplementedError @classmethod def from_args(cls, args:", "network :param inputs: network inputs :param logits: network outputs :param targets: output targets", "epoch-wise to the output stream and loggers (e.g. tensorboard), all single results of", "uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from uninas.utils.args", "head_weights: list, each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active", "torch.Tensor: \"\"\" how to combine tensors if they are gathered from distributed training", "self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir: str, key: str, stats: dict)", "list) -> 'AbstractMetric': \"\"\" :param args: global arguments namespace :param index: index of", "stream and loggers (e.g. tensorboard), all single results of _evaluate() are weighted averaged", "str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to combine tensors if they are", ":param net: evaluated network :param inputs: network inputs :param logits: network outputs :param", "return self._to_dict(key, \"\", self.get_log_name(), cur) def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor],", "dictionary of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\"", "isinstance(v, list) else v for k, v in stats.items()} if len(stats) > 0:", "head_weights: how each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights,", "head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for k, v in kwargs.items(): self.__setattr__(k,", "when this class (or a child class) is chosen \"\"\" return super().args_to_add(index) +", "a child class) is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int,", "single results of _evaluate() are weighted averaged later, by how the batch sizes", "some targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__() self.head_weights = head_weights for", "dictionary of string keys with corresponding results \"\"\" with torch.no_grad(): cur = self._evaluate(net,", "tensorboard), all single results of _evaluate() are weighted averaged later, by how the", "outputs :param targets: output targets \"\"\" new_logits = [] for tensor in logits", "self._evaluate(net, inputs, logits, targets) cur = {k: v.unsqueeze() for k, v in cur.items()}", "defaultdict(dict) self.each_epochs = each_epochs self.is_active = False def get_log_name(self) -> str: raise NotImplementedError", "on :param head_weights: how each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index)", "[torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated network", "stats)) return {} def _compute_stats(self, save_dir: str, key: str, stats: dict) -> dict:", "0 else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' % (s, k): v", "network outputs :param targets: output targets :return: dictionary of string keys with corresponding", "stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir:", "dict entries \"\"\" s = \"%s/%s\" % (key, name) if len(prefix) == 0", "in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return {}", "where the target equals the ignore index if ignore_target_index >= 0: to_use =", "new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets: torch.Tensor) -> torch.Tensor: \"\"\" remove one-hot encoding", "targets: output targets \"\"\" new_logits = [] for tensor in logits + [targets]:", "raise NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key:", "stats is None: stats = self.get_accumulated_stats(key) else: with torch.no_grad(): stats = {k: self._combine_tensors(k,", "str, key: str, prefix: str, stats: dict): \"\"\" visualize this metric \"\"\" pass", "predicting an ignored class if ignore_prediction_index >= 0: new_logits = [lg.clone().detach_() for lg", "\"\"\" return {} def _viz_stats(self, save_dir: str, key: str, prefix: str, stats: dict):", "str, prefix=\"\", epoch: int = None, stats: dict = None) -> dict: \"\"\"", "\"train\" or \"test\" :return: dictionary of string keys with corresponding results \"\"\" with", "index if ignore_target_index >= 0: to_use = targets != ignore_target_index logits = [lg[to_use]", "between network outputs and some targets \"\"\" def __init__(self, head_weights: list, **kwargs): super().__init__()", "v.value return {} def _evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor)", "that is evaluated on :param head_weights: how each head is weighted \"\"\" all_parsed", "inputs, logits, targets) # add all values to current stat dict for k,", "str, name: str, dct: dict) -> dict: \"\"\" adds key and name to", "0)) def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str)", "visualized, otherwise the result of accumulating the stats \"\"\" return {} def reset(self,", "torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1, ...] tensors into [batch, classes]", "if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1]", "return {} def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int = None,", "metric :param data_set: data set that is evaluated on :param head_weights: how each", "return self.stats.get(key, {}) def eval_accumulated_stats(self, save_dir: str, key: str, prefix=\"\", epoch: int =", "prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats)) return {} def _compute_stats(self, save_dir: str, key: str,", "if isinstance(epoch, int): save_dir = '%s/epoch_%d/' % (save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats)", ":param index: index of this metric :param data_set: data set that is evaluated", "\"\"\" compute this metric \"\"\" return {} def _viz_stats(self, save_dir: str, key: str,", "self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net, inputs, logits, targets) # add", "each_epochs=-1, **kwargs): super().__init__(head_weights, **kwargs) self.stats = defaultdict(dict) self.each_epochs = each_epochs self.is_active = False", "[torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\" :param net: evaluated network", "AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: ResultValue}: \"\"\"", "by how the batch sizes of each single result \"\"\" def get_log_name(self) ->", "if len(targets.shape) == 2: return torch.argmax(targets, dim=-1) return targets @classmethod def _ignore_with_index(cls, logits:", "len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' % (s,", "k in self.stats[key]: self.stats[key][k] = self._combine_tensors(k, [self.stats[key][k], v.value]) else: self.stats[key][k] = v.value return", "min_ = lg.min(axis=1).values lg[:, ignore_prediction_index] = min_ logits = new_logits return logits, targets", "\"\"\" Metrics during (supervised) network training, between network outputs and some targets \"\"\"", "[lg[to_use] for lg in logits] targets = targets[to_use] # prevent logits from predicting", "new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else: new_logits.append(tensor) return new_logits[:-1], new_logits[-1] @classmethod def _remove_onehot(cls, targets:", "data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global arguments namespace :param", "AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\"", "of string keys with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A", "targets @classmethod def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor):", "{str: torch.Tensor}: \"\"\" :param net: evaluated network :param inputs: network inputs :param logits:", ":param epoch: optional int :param stats: {str: tensor} or {str: [tensor]} :return: usually", "torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net: evaluated network", "inputs :param logits: network outputs :param targets: output targets :param key: prefix for", "NotImplementedError def evaluate(self, net: AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor, key: str)", "is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return cls(head_weights=head_weights, **all_parsed) @classmethod def _to_dict(cls,", "child class) is chosen \"\"\" return super().args_to_add(index) + [ Argument('each_epochs', default=-1, type=int, help='visualize", "dict = None) -> dict: \"\"\" visualize/log this metric :param save_dir: if stats", "[tensor]} :return: usually empty dict if stats are visualized, otherwise the result of", "NotImplementedError @classmethod def _combine_tensors(cls, dict_key: str, tensors: [torch.Tensor]) -> torch.Tensor: \"\"\" how to", "0) and ((epoch + 1) % self.each_epochs == 0)) def evaluate(self, net: AbstractNetwork,", "> 0) and ((epoch + 1) % self.each_epochs == 0)) def evaluate(self, net:", "string keys with corresponding results \"\"\" raise NotImplementedError class AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric", "values to current stat dict for k, v in cur.items(): if k in", "logits, targets) cur = {k: v.unsqueeze() for k, v in cur.items()} return self._to_dict(key,", "keys with corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that", "[targets]: shape = tensor.shape if len(shape) > 2: new_logits.append(tensor.transpose(0, 1).reshape(shape[1], -1).transpose(0, 1)) else:", "target equals the ignore index, prevent logits from predicting an ignored class :param", "AbstractAccumulateMetric(AbstractMetric): \"\"\" A metric that accumulates stats first \"\"\" def __init__(self, head_weights: list,", "\"\"\" adds key and name to all dict entries \"\"\" s = \"%s/%s\"", ":param head_weights: how each head is weighted \"\"\" all_parsed = cls._all_parsed_arguments(args, index=index) return", "network predicts this index, choose the next most-likely prediction instead \"\"\" # remove", "keys: self.stats[k].clear() def on_epoch_start(self, epoch: int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs", "min_ logits = new_logits return logits, targets def get_accumulated_stats(self, key: str) -> {str:", "str) -> {str: torch.Tensor}: \"\"\" get the averaged statistics for a specific key", "corresponding results \"\"\" raise NotImplementedError class AbstractLogMetric(AbstractMetric): \"\"\" A metric that is logged", "ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove all occurrences where the target equals the", "(save_dir, epoch) self._viz_stats(save_dir, key, prefix, stats) return self._to_dict(key, prefix, self.get_log_name(), self._compute_stats(save_dir, key, stats))", "from uninas.data.abstract import AbstractDataSet from uninas.models.networks.abstract import AbstractNetwork from uninas.training.result import ResultValue from", "to save them :param key: key to log :param prefix: string prefix added", "all (if key == None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False): pass", "-> {str: ResultValue}: \"\"\" :param net: evaluated network :param inputs: network inputs :param", ":param targets: output targets :param key: prefix for the dict keys, e.g. \"train\"", "from different batches \"\"\" return sum(tensors) @classmethod def args_to_add(cls, index=None) -> [Argument]: \"\"\"", "AbstractNetwork, inputs: torch.Tensor, logits: [torch.Tensor], targets: torch.Tensor) -> {str: ResultValue}: \"\"\" :param net:", "\"%s/%s\" % (key, name) if len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key,", "{k: v.unsqueeze() for k, v in cur.items()} return self._to_dict(key, \"\", self.get_log_name(), cur) def", "if isinstance(v, list) else v for k, v in stats.items()} if len(stats) >", "head_weights: list) -> 'AbstractMetric': \"\"\" :param args: global arguments namespace :param index: index", "-> ([torch.Tensor], torch.Tensor): \"\"\" reshape all [batch, classes, n0, n1, ...] tensors into", "@classmethod def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) -> 'AbstractMetric':", "def _ignore_with_index(cls, logits: [torch.Tensor], targets: torch.Tensor, ignore_target_index=-999, ignore_prediction_index=-999) ->\\ ([torch.Tensor], torch.Tensor): \"\"\" remove", "network training, between network outputs and some targets \"\"\" def __init__(self, head_weights: list,", "or all (if key == None) \"\"\" pass def on_epoch_start(self, epoch: int, is_last=False):", "\"\"\" if not self.is_active: return {} with torch.no_grad(): cur = self._evaluate(net, inputs, logits,", "prefix: string prefix added in front of each dict key :param epoch: optional", "% (key, name) if len(prefix) == 0 else \"%s/%s/%s\" % (prefix, key, name)", "adds key and name to all dict entries \"\"\" s = \"%s/%s\" %", "dict: \"\"\" adds key and name to all dict entries \"\"\" s =", "int, is_last=False): self.reset(key=None) self.is_active = is_last or ((self.each_epochs > 0) and ((epoch +", "Metrics during (supervised) network training, between network outputs and some targets \"\"\" def", "inputs :param logits: network outputs :param targets: output targets :return: dictionary of string", "uninas.utils.args import ArgsInterface, Namespace, Argument class AbstractMetric(ArgsInterface): \"\"\" Metrics during (supervised) network training,", "targets: torch.Tensor, key: str) -> {str: torch.Tensor}: \"\"\" :param net: evaluated network :param", "\"\"\" how to combine tensors if they are gathered from distributed training or", "== 0 else \"%s/%s/%s\" % (prefix, key, name) return {'%s/%s' % (s, k):", "NotImplementedError @classmethod def from_args(cls, args: Namespace, index: int, data_set: AbstractDataSet, head_weights: list) ->" ]
[ "def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self): os.environ['PYMUX_PANE'] = self._pymux_pane_env", "libpymux.panes import ExecPane import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env", "from libpymux.panes import ExecPane import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor)", "ExecPane import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env", "import ExecPane import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env =", "BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self): os.environ['PYMUX_PANE'] =", "pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self): os.environ['PYMUX_PANE'] = self._pymux_pane_env os.execv('/bin/bash', ['bash'])", "os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self):", "class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self): os.environ['PYMUX_PANE']", "__init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def _do_exec(self): os.environ['PYMUX_PANE'] = self._pymux_pane_env os.execv('/bin/bash',", "import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env): super().__init__(pane_executor) self._pymux_pane_env = pymux_pane_env def", "<reponame>jonathanslenders/old-pymux from libpymux.panes import ExecPane import os class BashPane(ExecPane): def __init__(self, pane_executor, pymux_pane_env):" ]
[ "path_oommf = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl' mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi", "length threads_string = ' -threads 28 ' oommf_command = 'tclsh ' + path_oommf", "oommf_command = 'tclsh ' + path_oommf + param_string + threads_string + mif_file subprocess.call(oommf_command,", "' % length threads_string = ' -threads 28 ' oommf_command = 'tclsh '", "' -threads 28 ' oommf_command = 'tclsh ' + path_oommf + param_string +", "mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi '#-parameters \"integer_length %", "28 ' oommf_command = 'tclsh ' + path_oommf + param_string + threads_string +", "threads_string = ' -threads 28 ' oommf_command = 'tclsh ' + path_oommf +", "= ' -threads 28 ' oommf_command = 'tclsh ' + path_oommf + param_string", "' boxsi '#-parameters \"integer_length % s\" ' % length threads_string = ' -threads", "subprocess path_oommf = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl' mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = '", "= 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl' mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi '#-parameters", "'#-parameters \"integer_length % s\" ' % length threads_string = ' -threads 28 '", "= 'tclsh ' + path_oommf + param_string + threads_string + mif_file subprocess.call(oommf_command, shell=True)", "= ' boxsi '#-parameters \"integer_length % s\" ' % length threads_string = '", "boxsi '#-parameters \"integer_length % s\" ' % length threads_string = ' -threads 28", "= 2 param_string = ' boxsi '#-parameters \"integer_length % s\" ' % length", "2 param_string = ' boxsi '#-parameters \"integer_length % s\" ' % length threads_string", "'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl' mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi '#-parameters \"integer_length", "import subprocess path_oommf = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/oommf.tcl' mif_file = 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string =", "\"integer_length % s\" ' % length threads_string = ' -threads 28 ' oommf_command", "-threads 28 ' oommf_command = 'tclsh ' + path_oommf + param_string + threads_string", "s\" ' % length threads_string = ' -threads 28 ' oommf_command = 'tclsh", "' oommf_command = 'tclsh ' + path_oommf + param_string + threads_string + mif_file", "'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi '#-parameters \"integer_length % s\" '", "= 'C:/Users/jmank/Desktop/oommf12b4_20200930_86_x64/oommf/Skyrmion/skyrmionDome2.mif' length = 2 param_string = ' boxsi '#-parameters \"integer_length % s\"", "param_string = ' boxsi '#-parameters \"integer_length % s\" ' % length threads_string =", "% length threads_string = ' -threads 28 ' oommf_command = 'tclsh ' +", "% s\" ' % length threads_string = ' -threads 28 ' oommf_command =", "length = 2 param_string = ' boxsi '#-parameters \"integer_length % s\" ' %" ]
[ "Any, Generator, Generic, Optional, Type, TypeVar, Union, ) import numpy as np from", "-> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod", "B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice])", "import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from", "import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union, ) import numpy", "NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other:", "item: list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B:", "def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) -> B: raise NotImplementedError()", "abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type,", "slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) -> bool:", "def getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self, query: Query)", "Type, TypeVar, Union, ) import numpy as np from pandas import DataFrame from", "__future__ import annotations from abc import abstractclassmethod, abstractmethod, abstractproperty from typing import (", "Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self)", "index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise", "Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise", "np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def", "-> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def", "annotations from abc import abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any,", "Type[B], all_backends: list[B], ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def", "link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError()", "list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B: raise", "int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod", "Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def", "getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) ->", "IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) ->", "False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]])", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame: raise", "def __getitem__(self, item: Union[int, list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod", "raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def", "iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool", "def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]:", "-> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool: raise NotImplementedError()", "NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self:", "-> int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def __str__(self:", "IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def", "str, value: Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) ->", "from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import", "def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any) ->", "raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError()", "NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]:", "raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) ->", "B, new_backend: B, ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def", "NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self,", "NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any) -> None: raise NotImplementedError() @abstractmethod", "B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self: B) -> str: raise NotImplementedError()", "B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError()", "from __future__ import annotations from abc import abstractclassmethod, abstractmethod, abstractproperty from typing import", "NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self)", "list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool", "tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index", "def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise", "None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError()", "item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) ->", "@abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str,", "def equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any)", "def getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool])", "Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B:", "raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self)", "= False) -> B: raise NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise", ") import numpy as np from pandas import DataFrame from tanuki.data_store.data_type import DataType", "iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool:", "False) -> B: raise NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise NotImplementedError()", "Union[int, list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item:", "B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice]) ->", "raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice]) -> B:", "def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError()", "LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def", "__getitem__(self, item: Union[int, list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def", "NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) -> B:", "raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) ->", "@abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame:", "raise NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self:", "-> Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def append(self:", "raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self) ->", "B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) -> bool: raise NotImplementedError()", "def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) -> Index:", "def iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index:", "def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int,", "NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray:", "DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query", "class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice]) -> B: raise NotImplementedError()", "@abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self: B,", "NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self)", "bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index: bool = False) ->", "-> B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B: raise NotImplementedError()", "-> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice])", "@abstractmethod def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) ->", "None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise", "Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty", "Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise", "list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def", "B, ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B,", "def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self, item:", "drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends:", "bool = False) -> B: raise NotImplementedError() @abstractmethod def nunique(self: B) -> int:", "-> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty", "DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self)", "def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise", "def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B,", "import numpy as np from pandas import DataFrame from tanuki.data_store.data_type import DataType from", "dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise", "if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import", "NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice]) -> B: raise", "raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]:", "-> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod def", "NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str,", "LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice]) -> B: raise NotImplementedError() class", "import IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame: raise", "NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod", "item: Union[int, list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self,", "NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str,", "from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class", "@abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise", "Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]:", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod", "NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]: raise", "from abc import abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any, Generator,", "B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index: bool = False)", "@abstractmethod def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other:", "def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any)", "-> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None,", "def index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise", "nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise", "pandas import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING:", "def query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str,", "NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) ->", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame:", "bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice]) -> B: raise", "-> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError()", "np from pandas import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken", "DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias", "values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError()", "DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty", "-> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError()", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError()", "NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self,", "ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices:", "Union[Any, list, slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B)", "@abstractmethod def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other:", "import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from", "def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any)", "@abstractmethod def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend:", "NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def", "list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self)", "-> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool =", "ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod", "-> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError()", "DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "new_backend: B, ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self:", "-> Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) -> B:", "-> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod def", "any]: raise NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self)", "@abstractmethod def append(self: B, new_backend: B, ignore_index: bool = False) -> B: raise", "column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str,", "value: Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index:", "raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod", "raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self,", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame: raise", "def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]:", "def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient)", "Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list,", "getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self, query: Query) ->", "NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B)", "NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self,", "Generator, Generic, Optional, Type, TypeVar, Union, ) import numpy as np from pandas", "@abstractmethod def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self: B) ->", "query: Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any)", "type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]: raise", "indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index:", "@abstractmethod def __getitem__(self, item: Union[int, list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]):", "@abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]:", "NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str,", "-> B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index: bool =", "numpy as np from pandas import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token", "-> B: raise NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod", "__ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) ->", "int: raise NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod def", "cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) ->", "B) -> B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index: bool", "list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self, query: Query) -> B: raise", "DataBackend: @abstractmethod def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self: B)", "orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError()", "-> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod def", "-> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def", "@abstractmethod def set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def", "raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod", "DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame: raise", "__len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]: raise", "as np from pandas import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import", "@abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any)", "def set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self:", "tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]):", "def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError()", "-> int: raise NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod", "@abstractmethod def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other:", "None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None,", "query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value:", "ignore_index: bool = False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self,", "@abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) ->", "Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise", "Optional, Type, TypeVar, Union, ) import numpy as np from pandas import DataFrame", "Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame: raise", "raise NotImplementedError() @abstractmethod def __le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "@abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) ->", "DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "B) -> int: raise NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise NotImplementedError()", "@abstractmethod def getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self, query:", "typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union, ) import", "@abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self,", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame:", "list, slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any,", "slice]) -> B: raise NotImplementedError() class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list,", "@abstractmethod def query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item:", "NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def", "mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self, query: Query) -> B:", "raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod def", "@abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame:", "def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any)", "raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "from pandas import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if", "def append(self: B, new_backend: B, ignore_index: bool = False) -> B: raise NotImplementedError()", "NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def append(self: B,", "DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item:", "-> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) ->", "-> bool: raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod", "abstractproperty from typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union,", "= TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice]) ->", "-> list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def", "__gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) ->", "raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def", "raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def", "loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B) -> ILocIndexer[B]: raise", "None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False) -> Generator[tuple,", "columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty", "list[B], ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def nunique(self: B)", "B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty", "Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod", "B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError()", "Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any:", "def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any)", "def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any)", "NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def", "index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError()", "False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) -> B:", "tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B =", "-> B: raise NotImplementedError() @abstractmethod def query(self, query: Query) -> B: raise NotImplementedError()", "from typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union, )", "raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self,", "-> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) -> B: raise", "all_backends: list[B], ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def nunique(self:", "B: raise NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def", "None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod", "NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self)", "@abstractmethod def equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other:", "def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod", "def __len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None, None]:", "@abstractmethod def __getitem__(self, item: Union[Any, list, slice]) -> B: raise NotImplementedError() class DataBackend:", "-> B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) -> bool: raise", "equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) ->", "dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty def", "reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index:", "index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B:", "concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod", "from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B", "from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self,", "Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False)", "raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self:", "Generic, Optional, Type, TypeVar, Union, ) import numpy as np from pandas import", "def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]:", "TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query", "bool = False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int])", "= False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str,", "def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self: B) -> str:", "def __le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int:", "__str__(self: B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self: B) -> str: raise", "is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise", "@abstractmethod def __le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) ->", "bool: raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError() @abstractmethod def", "DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import Index from tanuki.data_store.index.index_alias import IndexAlias", "raise NotImplementedError() @abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self) ->", "dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) ->", "__ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) ->", "def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type])", "raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty def values(self) ->", "raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "@abstractmethod def drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls:", "item: Union[Any, list, slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self:", "def __setitem__(self, item: str, value: Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self,", "__getitem__(self, item: Union[Any, list, slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod def", "item: str, value: Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias)", "@abstractmethod def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other:", "set_index(self: B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self: B)", "B: raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod", "NotImplementedError() @abstractmethod def append(self: B, new_backend: B, ignore_index: bool = False) -> B:", "abc import abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any, Generator, Generic,", "abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar,", "dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]:", "-> Union[str, list[str]]: raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError()", "to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) -> Index: raise", "import abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING, Any, Generator, Generic, Optional,", "def __getitem__(self, item: Union[Any, list, slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod", "-> Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]: raise NotImplementedError() @abstractproperty", "other: Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame:", "__le__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise", "DataFrame: raise NotImplementedError() @abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self)", "__lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self, other: Any) ->", "list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B: raise", "DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod", "B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any) -> None: raise", "-> DataBackend: raise NotImplementedError() @abstractmethod def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError()", "@abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self,", "import annotations from abc import abstractclassmethod, abstractmethod, abstractproperty from typing import ( TYPE_CHECKING,", "-> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any) -> None:", "raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod", "B: raise NotImplementedError() @abstractmethod def query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod", "B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self: B) -> Optional[DataToken]: raise NotImplementedError()", "None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]: raise", "NotImplementedError() @abstractmethod def getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self,", "NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes:", "index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index, IndexAlias])", "__eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self, other: Any) ->", "@abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False) -> B: raise", "class DataBackend: @abstractmethod def is_link(self: B) -> bool: raise NotImplementedError() @abstractmethod def link_token(self:", "bool = False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def __getitem__(self, item:", "B, index: Union[Index, IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self: B) ->", "@abstractmethod def to_dict(self, orient) -> dict[str, any]: raise NotImplementedError() @abstractproperty def index(self) ->", "raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def", "def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index:", "NotImplementedError() @abstractmethod def __eq__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ne__(self,", "NotImplementedError() @abstractmethod def __getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def", "raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any) -> None: raise NotImplementedError()", "None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError()", "def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def __str__(self: B) -> str:", "TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union, ) import numpy as np", "NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool: raise NotImplementedError() @abstractmethod def __eq__(self,", "@abstractmethod def __setitem__(self, item: str, value: Any) -> None: raise NotImplementedError() @abstractmethod def", "raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend: raise NotImplementedError() @abstractmethod", "__getitem__(self, item: Union[str, list[bool]]) -> Any: raise NotImplementedError() @abstractmethod def getitems(self, item: list[str])", "NotImplementedError() @abstractmethod def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self: B)", "@abstractmethod def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def __str__(self: B) ->", "@abstractproperty def index(self) -> Index: raise NotImplementedError() @abstractproperty def index_name(self) -> Union[str, list[str]]:", "append(self: B, new_backend: B, ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod", "itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]: raise NotImplementedError() @abstractmethod def", "list, slice]) -> B: raise NotImplementedError() class DataBackend: @abstractmethod def is_link(self: B) ->", "from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index import", "@abstractmethod def __str__(self: B) -> str: raise NotImplementedError() @abstractmethod def __repr__(self: B) ->", "Union, ) import numpy as np from pandas import DataFrame from tanuki.data_store.data_type import", "NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self: B)", "@abstractmethod def __ge__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other:", "Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame: raise", "= False) -> B: raise NotImplementedError() @abstractmethod def drop_indices(self: B, indices: list[int]) ->", "B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B], ignore_index: bool = False)", "raise NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def", "NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __le__(self,", "raise NotImplementedError() @abstractproperty def loc(self: B) -> LocIndexer[B]: raise NotImplementedError() @abstractproperty def iloc(self:", "Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self, item: str, value: Any) ->", "raise NotImplementedError() @abstractproperty def values(self) -> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) ->", "-> dict[str, DataType]: raise NotImplementedError() @abstractmethod def cast_columns(self, column_dtypes: dict[str, type]) -> DataBackend:", "B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B], all_backends: list[B],", "@abstractmethod def getitems(self, item: list[str]) -> B: raise NotImplementedError() @abstractmethod def getmask(self, mask:", "( TYPE_CHECKING, Any, Generator, Generic, Optional, Type, TypeVar, Union, ) import numpy as", "import Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\",", "TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int, list, slice]) -> B:", "to_pandas(self) -> DataFrame: raise NotImplementedError() @abstractproperty def columns(self) -> list[str]: raise NotImplementedError() @abstractproperty", "ignore_index: bool = False) -> B: raise NotImplementedError() @abstractmethod def nunique(self: B) ->", "NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __gt__(self, other: Any) -> DataFrame:", "B) -> ILocIndexer[B]: raise NotImplementedError() @abstractmethod def equals(self, other: Any) -> bool: raise", "DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from tanuki.data_store.index.index", "B: raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod def", "other: Any) -> DataFrame: raise NotImplementedError() @abstractmethod def __ge__(self, other: Any) -> DataFrame:", "__iter__(self) -> Generator[str, None, None]: raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B],", "import DataFrame from tanuki.data_store.data_type import DataType from tanuki.database.data_token import DataToken if TYPE_CHECKING: from", "class ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice]) -> B: raise NotImplementedError()", "raise NotImplementedError() @abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod", "NotImplementedError() @abstractmethod def getmask(self, mask: list[bool]) -> B: raise NotImplementedError() @abstractmethod def query(self,", "@abstractmethod def itertuples(self, ignore_index: bool = False) -> Generator[tuple, None, None]: raise NotImplementedError()", "-> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool =", "import Query B = TypeVar(\"B\", bound=\"DataBackend\") class LocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[int,", "-> np.ndarray: raise NotImplementedError() @abstractproperty def dtypes(self) -> dict[str, DataType]: raise NotImplementedError() @abstractmethod", "__setitem__(self, item: str, value: Any) -> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias:", "def drop_indices(self: B, indices: list[int]) -> B: raise NotImplementedError() @abstractclassmethod def concat(cls: Type[B],", "ILocIndexer(Generic[B]): @abstractmethod def __getitem__(self, item: Union[Any, list, slice]) -> B: raise NotImplementedError() class", "-> B: raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise NotImplementedError() @abstractmethod", "@abstractmethod def __len__(self) -> int: raise NotImplementedError() @abstractmethod def __iter__(self) -> Generator[str, None,", "@abstractmethod def iterrows(self) -> Generator[tuple[int, B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self,", "NotImplementedError() @abstractmethod def nunique(self: B) -> int: raise NotImplementedError() @abstractmethod def __str__(self: B)", "get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError() @abstractmethod def set_index(self: B, index: Union[Index,", "TypeVar, Union, ) import numpy as np from pandas import DataFrame from tanuki.data_store.data_type", "Index from tanuki.data_store.index.index_alias import IndexAlias from tanuki.data_store.query import Query B = TypeVar(\"B\", bound=\"DataBackend\")", "-> DataFrame: raise NotImplementedError() @abstractmethod def __lt__(self, other: Any) -> DataFrame: raise NotImplementedError()", "B], None, None]: raise NotImplementedError() @abstractmethod def itertuples(self, ignore_index: bool = False) ->", "NotImplementedError() @abstractmethod def query(self, query: Query) -> B: raise NotImplementedError() @abstractmethod def __setitem__(self,", "IndexAlias]) -> B: raise NotImplementedError() @abstractmethod def reset_index(self: B) -> B: raise NotImplementedError()", "-> None: raise NotImplementedError() @abstractmethod def get_index(self, index_alias: IndexAlias) -> Index: raise NotImplementedError()" ]
[ "statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p',", "idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname", "outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0}, sample {1} of", "'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5',", "'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5',", "'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57']", "os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout", "import subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66',", "indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0},", "'-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads,", "'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1',", "'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile", "of {2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir,", "'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames =", "in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples))) forreads", "'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx,", "'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5',", "= '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples):", "os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end',", "'-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname] with open(statsout, 'w')", "'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61',", "'/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0}, sample {1}", "command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1',", "'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53',", "os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command", "= ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads,", "indexfile, '-1', forreads, '-2', revreads, '-S', samname] with open(statsout, 'w') as outfh: subprocess.call(command,", "'{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command =", "'--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname]", "'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2',", "'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54',", "revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir,", "print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir,", "= ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2',", "= os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx]))", "'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed'", "forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] +", "'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample", "'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4',", "'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for", "len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx]", "'8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname] with open(statsout, 'w') as", "forreads, '-2', revreads, '-S', samname] with open(statsout, 'w') as outfh: subprocess.call(command, stderr =", "'--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S',", "'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile =", "['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3',", "'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1',", "'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir", "{0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample))", "'.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal',", "+ '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant',", "for idx, sample in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx +", "'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56',", "'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4',", "'-2', revreads, '-S', samname] with open(statsout, 'w') as outfh: subprocess.call(command, stderr = outfh)", "'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3',", "= os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8',", "subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67',", "readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in", "'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4',", "#python3 import os import subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63',", "enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples))) forreads =", "os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x',", "'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3',", "idx, sample in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1,", "samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr',", "'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62',", "'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4',", "'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1',", "+ 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname =", "samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3', 'CADNeuriteRep4', 'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1',", "'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos'", "'--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname] with open(statsout,", "['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60',", "= '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0}, sample", "= '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample,", "'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames", "<gh_stars>0 #python3 import os import subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72',", "sample {1} of {2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads", "1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir,", "'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments'", "'-1', forreads, '-2', revreads, '-S', samname] with open(statsout, 'w') as outfh: subprocess.call(command, stderr", "{1} of {2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads =", "samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2',", "os import subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65',", "'{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam') statsout =", "'{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile,", "sample in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx + 1, len(samples)))", "'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir =", "'/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning {0}, sample {1} of {2}...'.format(sample, idx", "'CADNeuriteRep5', 'CADSomaRep1', 'CADSomaRep2', 'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2',", "'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2',", "import os import subprocess samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64',", "'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55', 'N2A_Soma_Rep4_S56', 'N2A_Soma_Rep5_S57'] samplenames = ['CADNeuriteRep1', 'CADNeuriteRep2', 'CADNeuriteRep3',", "= os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample)) samname = os.path.join(outputdir, samplenames[idx] + '.sam')", "'/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments' for idx, sample in enumerate(samples): print('Aligning", "'-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname] with open(statsout, 'w') as outfh:", "['bowtie2', '-q', '--end-to-end', '--fr', '--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2',", "'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir", "= ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59',", "samples = ['CAD_Neurite_Rep1_S68', 'CAD_Neurite_Rep2_S69', 'CAD_Neurite_Rep3_S70', 'CAD_Neurite_Rep4_S71', 'CAD_Neurite_Rep5_S72', 'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58',", "= os.path.join(outputdir, samplenames[idx] + '.sam') statsout = os.path.join(outputdir, '{0}.bowtiestats.txt'.format(samplenames[idx])) command = ['bowtie2', '-q',", "'CAD_Soma_Rep1_S63', 'CAD_Soma_Rep2_S64', 'CAD_Soma_Rep3_S65', 'CAD_Soma_Rep4_S66', 'CAD_Soma_Rep5_S67', 'N2A_Neurite_Rep1_S58', 'N2A_Neurite_Rep2_S59', 'N2A_Neurite_Rep3_S60', 'N2A_Neurite_Rep4_S61', 'N2A_Neurite_Rep5_S62', 'N2A_Soma_Rep1_S53', 'N2A_Soma_Rep2_S54', 'N2A_Soma_Rep3_S55',", "'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5'] readdir = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/RawReads/trimmed' indexfile = '/beevol/home/taliaferro/data/cisElementScreen/Fractionation/EqualRNAamount/Alignments/Bowtie2Index/mm10oligos' outputdir =", "{2}...'.format(sample, idx + 1, len(samples))) forreads = os.path.join(readdir, '{0}.R1.trimmed.fq.gz'.format(sample)) revreads = os.path.join(readdir, '{0}.R2.trimmed.fq.gz'.format(sample))", "'--no-discordant', '--no-unal', '-p', '8', '-x', indexfile, '-1', forreads, '-2', revreads, '-S', samname] with", "'CADSomaRep3', 'CADSomaRep4', 'CADSomaRep5', 'N2ANeuriteRep1', 'N2ANeuriteRep2', 'N2ANeuriteRep3', 'N2ANeuriteRep4', 'N2ANeuriteRep5', 'N2ASomaRep1', 'N2ASomaRep2', 'N2ASomaRep3', 'N2ASomaRep4', 'N2ASomaRep5']" ]
[ "Parameters ---------- dist : Distribution The distribution of interest. rv_x : iterable The", "from .. import gk_common_information from ...utils import unitful __all__ = [ 'no_communication_skar', ]", "to 'indices'. Returns ------- skar : float The no-communication secret key agreement rate.", "import gk_common_information from ...utils import unitful __all__ = [ 'no_communication_skar', ] @unitful def", "and no public communication. Parameters ---------- dist : Distribution The distribution of interest.", "defaults to 'indices'. Returns ------- skar : float The no-communication secret key agreement", "\"\"\" Secret Key Agreement Rate when communication is not permitted. \"\"\" from ..", "elements of `crvs` and `rvs` are interpreted as random variable indices. If equal", "iterable The indices to consider as the X variable, Alice. rv_y : iterable", "communication. Parameters ---------- dist : Distribution The distribution of interest. rv_x : iterable", "`crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements", "of interest. rv_x : iterable The indices to consider as the X variable,", "permitted. \"\"\" from .. import gk_common_information from ...utils import unitful __all__ = [", "Y can agree upon a key with Z eavesdropping, and no public communication.", "when communication is not permitted. \"\"\" from .. import gk_common_information from ...utils import", "`crvs` and `rvs` are interpreted as random variable indices. If equal to 'names',", "to consider as the Y variable, Bob. rv_z : iterable The indices to", "indices to consider as the Z variable, Eve. rv_mode : str, None Specifies", "can agree upon a key with Z eavesdropping, and no public communication. Parameters", "interest. rv_x : iterable The indices to consider as the X variable, Alice.", "upon a key with Z eavesdropping, and no public communication. Parameters ---------- dist", "are interpreted as random variable indices. If equal to 'names', the the elements", "The indices to consider as the Z variable, Eve. rv_mode : str, None", "variable, Eve. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`.", "@unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at which X", "at which X and Y can agree upon a key with Z eavesdropping,", ": iterable The indices to consider as the Y variable, Bob. rv_z :", "] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at which", "Z eavesdropping, and no public communication. Parameters ---------- dist : Distribution The distribution", "Alice. rv_y : iterable The indices to consider as the Y variable, Bob.", "distribution of interest. rv_x : iterable The indices to consider as the X", "def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at which X and", "no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at which X and Y", "'names'}. If equal to 'indices', then the elements of `crvs` and `rvs` are", "\"\"\" The rate at which X and Y can agree upon a key", "Secret Key Agreement Rate when communication is not permitted. \"\"\" from .. import", "public communication. Parameters ---------- dist : Distribution The distribution of interest. rv_x :", "dist : Distribution The distribution of interest. rv_x : iterable The indices to", "Eve. rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid", "variable indices. If equal to 'names', the the elements are interpreted as random", "then the value of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns -------", "as random variable names. If `None`, then the value of `dist._rv_mode` is consulted,", "[ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate", "the elements are interpreted as random variable names. If `None`, then the value", "`rvs` are interpreted as random variable indices. If equal to 'names', the the", "If equal to 'indices', then the elements of `crvs` and `rvs` are interpreted", "rv_y : iterable The indices to consider as the Y variable, Bob. rv_z", "options are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs`", "communication is not permitted. \"\"\" from .. import gk_common_information from ...utils import unitful", "are interpreted as random variable names. If `None`, then the value of `dist._rv_mode`", "to 'indices', then the elements of `crvs` and `rvs` are interpreted as random", "consulted, which defaults to 'indices'. Returns ------- skar : float The no-communication secret", "consider as the X variable, Alice. rv_y : iterable The indices to consider", "which defaults to 'indices'. Returns ------- skar : float The no-communication secret key", ": str, None Specifies how to interpret `rvs` and `crvs`. Valid options are:", "The no-communication secret key agreement rate. \"\"\" return gk_common_information(dist, [rv_x, rv_y], rv_z, rv_mode=rv_mode)", "Returns ------- skar : float The no-communication secret key agreement rate. \"\"\" return", "agree upon a key with Z eavesdropping, and no public communication. Parameters ----------", "to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to", "and `rvs` are interpreted as random variable indices. If equal to 'names', the", "names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults to", "which X and Y can agree upon a key with Z eavesdropping, and", "If `None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'.", "import unitful __all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z,", "the value of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns ------- skar", "with Z eavesdropping, and no public communication. Parameters ---------- dist : Distribution The", "rate at which X and Y can agree upon a key with Z", "random variable indices. If equal to 'names', the the elements are interpreted as", "the Z variable, Eve. rv_mode : str, None Specifies how to interpret `rvs`", ": iterable The indices to consider as the Z variable, Eve. rv_mode :", "indices. If equal to 'names', the the elements are interpreted as random variable", "iterable The indices to consider as the Z variable, Eve. rv_mode : str,", "indices to consider as the Y variable, Bob. rv_z : iterable The indices", "from ...utils import unitful __all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x,", "interpreted as random variable indices. If equal to 'names', the the elements are", "variable, Alice. rv_y : iterable The indices to consider as the Y variable,", "`rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then", "Rate when communication is not permitted. \"\"\" from .. import gk_common_information from ...utils", "Valid options are: {'indices', 'names'}. If equal to 'indices', then the elements of", "eavesdropping, and no public communication. Parameters ---------- dist : Distribution The distribution of", "The indices to consider as the X variable, Alice. rv_y : iterable The", "`dist._rv_mode` is consulted, which defaults to 'indices'. Returns ------- skar : float The", "---------- dist : Distribution The distribution of interest. rv_x : iterable The indices", "__all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\"", "The distribution of interest. rv_x : iterable The indices to consider as the", "The indices to consider as the Y variable, Bob. rv_z : iterable The", "as the X variable, Alice. rv_y : iterable The indices to consider as", "is consulted, which defaults to 'indices'. Returns ------- skar : float The no-communication", "Z variable, Eve. rv_mode : str, None Specifies how to interpret `rvs` and", ": float The no-communication secret key agreement rate. \"\"\" return gk_common_information(dist, [rv_x, rv_y],", "rv_mode : str, None Specifies how to interpret `rvs` and `crvs`. Valid options", "not permitted. \"\"\" from .. import gk_common_information from ...utils import unitful __all__ =", "indices to consider as the X variable, Alice. rv_y : iterable The indices", "the the elements are interpreted as random variable names. If `None`, then the", ": iterable The indices to consider as the X variable, Alice. rv_y :", "Agreement Rate when communication is not permitted. \"\"\" from .. import gk_common_information from", "and Y can agree upon a key with Z eavesdropping, and no public", "Key Agreement Rate when communication is not permitted. \"\"\" from .. import gk_common_information", "\"\"\" from .. import gk_common_information from ...utils import unitful __all__ = [ 'no_communication_skar',", "iterable The indices to consider as the Y variable, Bob. rv_z : iterable", "X variable, Alice. rv_y : iterable The indices to consider as the Y", "equal to 'names', the the elements are interpreted as random variable names. If", "to 'names', the the elements are interpreted as random variable names. If `None`,", "key with Z eavesdropping, and no public communication. Parameters ---------- dist : Distribution", "If equal to 'names', the the elements are interpreted as random variable names.", "variable names. If `None`, then the value of `dist._rv_mode` is consulted, which defaults", "equal to 'indices', then the elements of `crvs` and `rvs` are interpreted as", "of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns ------- skar : float", "Bob. rv_z : iterable The indices to consider as the Z variable, Eve.", ": Distribution The distribution of interest. rv_x : iterable The indices to consider", "the X variable, Alice. rv_y : iterable The indices to consider as the", "then the elements of `crvs` and `rvs` are interpreted as random variable indices.", "'indices'. Returns ------- skar : float The no-communication secret key agreement rate. \"\"\"", "Y variable, Bob. rv_z : iterable The indices to consider as the Z", "the elements of `crvs` and `rvs` are interpreted as random variable indices. If", "'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at", "of `crvs` and `rvs` are interpreted as random variable indices. If equal to", "Distribution The distribution of interest. rv_x : iterable The indices to consider as", "and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices', then the", "'indices', then the elements of `crvs` and `rvs` are interpreted as random variable", "as the Z variable, Eve. rv_mode : str, None Specifies how to interpret", "to consider as the X variable, Alice. rv_y : iterable The indices to", "as the Y variable, Bob. rv_z : iterable The indices to consider as", "X and Y can agree upon a key with Z eavesdropping, and no", "consider as the Y variable, Bob. rv_z : iterable The indices to consider", ".. import gk_common_information from ...utils import unitful __all__ = [ 'no_communication_skar', ] @unitful", "------- skar : float The no-communication secret key agreement rate. \"\"\" return gk_common_information(dist,", "interpreted as random variable names. If `None`, then the value of `dist._rv_mode` is", "variable, Bob. rv_z : iterable The indices to consider as the Z variable,", "the Y variable, Bob. rv_z : iterable The indices to consider as the", "to consider as the Z variable, Eve. rv_mode : str, None Specifies how", "are: {'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and", "random variable names. If `None`, then the value of `dist._rv_mode` is consulted, which", "how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal", "unitful __all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None):", "`None`, then the value of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns", "no public communication. Parameters ---------- dist : Distribution The distribution of interest. rv_x", "None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}.", "skar : float The no-communication secret key agreement rate. \"\"\" return gk_common_information(dist, [rv_x,", "a key with Z eavesdropping, and no public communication. Parameters ---------- dist :", "value of `dist._rv_mode` is consulted, which defaults to 'indices'. Returns ------- skar :", "as random variable indices. If equal to 'names', the the elements are interpreted", "= [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The", "rv_y, rv_z, rv_mode=None): \"\"\" The rate at which X and Y can agree", "rv_z : iterable The indices to consider as the Z variable, Eve. rv_mode", "rv_x : iterable The indices to consider as the X variable, Alice. rv_y", "{'indices', 'names'}. If equal to 'indices', then the elements of `crvs` and `rvs`", "is not permitted. \"\"\" from .. import gk_common_information from ...utils import unitful __all__", "Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If", "The rate at which X and Y can agree upon a key with", "rv_z, rv_mode=None): \"\"\" The rate at which X and Y can agree upon", "str, None Specifies how to interpret `rvs` and `crvs`. Valid options are: {'indices',", "rv_mode=None): \"\"\" The rate at which X and Y can agree upon a", "float The no-communication secret key agreement rate. \"\"\" return gk_common_information(dist, [rv_x, rv_y], rv_z,", "interpret `rvs` and `crvs`. Valid options are: {'indices', 'names'}. If equal to 'indices',", "'names', the the elements are interpreted as random variable names. If `None`, then", "rv_x, rv_y, rv_z, rv_mode=None): \"\"\" The rate at which X and Y can", "elements are interpreted as random variable names. If `None`, then the value of", "gk_common_information from ...utils import unitful __all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist,", "...utils import unitful __all__ = [ 'no_communication_skar', ] @unitful def no_communication_skar(dist, rv_x, rv_y,", "consider as the Z variable, Eve. rv_mode : str, None Specifies how to" ]
[ "s.recv(1024).decode().strip() print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req) if (m is not", "'\\n')) getRequests += 1 else: reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply", "= re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S]", "MSG_AUTH_RE.match(req) if (m is not None): letters = string.ascii_letters token = ''.join(random.choice(letters) for", "+= 1 else: reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n'))", "for i in range(20)) reply = 'SUCC ' + token print('[S] Replying: ',", "= False while (not auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req)", "def serve(srv): while 1: print('[S] Waiting for new connections') (s, address) = srv.accept()", "= s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req)", "= int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET,", "'SUCC ' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth =", "New connection from', address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting", "'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk", "re, socket import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE =", "'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else:", "new connections') (s, address) = srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S]", "import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE", "serve(srv): while 1: print('[S] Waiting for new connections') (s, address) = srv.accept() print('[S]", "= '127.0.0.1' SERVER_PORT = 1335 MAX_GET_REQUESTS = 10 import re, socket import random", "None): letters = string.ascii_letters token = ''.join(random.choice(letters) for i in range(20)) reply =", "srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S]", "10 import re, socket import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''')", "connection from', address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting for", "if (m is not None): letters = string.ascii_letters token = ''.join(random.choice(letters) for i", "SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to quit') srv =", "= True getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' +", "= s.recv(1024).decode().strip() print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req) if (m is", "'127.0.0.1' SERVER_PORT = 1335 MAX_GET_REQUESTS = 10 import re, socket import random import", "= string.ascii_letters token = ''.join(random.choice(letters) for i in range(20)) reply = 'SUCC '", "reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests = 0 while(auth): req =", "random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''')", "request') auth = False while (not auth): req = s.recv(1024).decode().strip() print('[S] Received: '", "message') if (__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server starting.", "not None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ',", "= re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting for new connections') (s,", "= ''.join(random.choice(letters) for i in range(20)) reply = 'SUCC ' + token print('[S]", "if (__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press", "= False elif (m_rvk is not None): auth = True break else: print('[S]", "handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting for request') auth =", "(m_rvk is not None): auth = True break else: print('[S] Invalid message') if", "address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting for request') auth", "s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk is not None): auth =", "# SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to quit') srv", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening", "= 10 import re, socket import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+)", "''.join(random.choice(letters) for i in range(20)) reply = 'SUCC ' + token print('[S] Replying:", "True getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req)", "0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req)", "None): auth = True break else: print('[S] Invalid message') if (__name__ == '__main__'):", "== '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to", "= 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = False elif", "m = MSG_AUTH_RE.match(req) if (m is not None): letters = string.ascii_letters token =", "(not auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req)", "req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk =", "is not None): auth = True break else: print('[S] Invalid message') if (__name__", "+(.+)''') def serve(srv): while 1: print('[S] Waiting for new connections') (s, address) =", "to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST,", "+(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting for", "print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply =", "Waiting for new connections') (s, address) = srv.accept() print('[S] New connection from', address)", "srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s):", "= 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get =", "reply = 'SUCC ' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n'))", "+ '\\n')) getRequests += 1 else: reply = 'TIMEOUT' print('[S] Replying: ', reply)", "+ '\\n')) auth = False elif (m_rvk is not None): auth = True", "is not None): letters = string.ascii_letters token = ''.join(random.choice(letters) for i in range(20))", "socket import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET", "re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting for new connections') (s, address)", "range(20)) reply = 'SUCC ' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply +", "auth = True getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: '", "None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ', reply)", "', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply = 'TIMEOUT' print('[S]", "', reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests = 0 while(auth): req", "= srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S] Closing connection') s.close() def", "', reply) s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk is not None):", "True break else: print('[S] Invalid message') if (__name__ == '__main__'): # SERVER_PORT =", "break else: print('[S] Invalid message') if (__name__ == '__main__'): # SERVER_PORT = int(argv[1])", "socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ', SERVER_HOST, SERVER_PORT)", "Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ', SERVER_HOST, SERVER_PORT) srv.listen(8) serve(srv) srv.close()", "print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests = 0", "= MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests <", "+(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv):", "1 else: reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth", "else: print('[S] Invalid message') if (__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S]", "string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE =", "s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if", "= 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1", "+ req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not None):", "reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply = 'TIMEOUT' print('[S] Replying:", "print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk is", "token = ''.join(random.choice(letters) for i in range(20)) reply = 'SUCC ' + token", "srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ', SERVER_HOST,", "Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply = 'TIMEOUT'", "socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ',", "while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk", "not None): letters = string.ascii_letters token = ''.join(random.choice(letters) for i in range(20)) reply", "Invalid message') if (__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server", "handle_connection(s): print('[S] Waiting for request') auth = False while (not auth): req =", "MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting for new connections')", "req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req) if (m", "connection') s.close() def handle_connection(s): print('[S] Waiting for request') auth = False while (not", "= 'SUCC ' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth", "'__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to quit')", "print('[S] Waiting for new connections') (s, address) = srv.accept() print('[S] New connection from',", "+(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting for new", "print('[S] Waiting for request') auth = False while (not auth): req = s.recv(1024).decode().strip()", "auth = False elif (m_rvk is not None): auth = True break else:", "import re, socket import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE", "address) = srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S] Closing connection') s.close()", "i in range(20)) reply = 'SUCC ' + token print('[S] Replying: ', reply)", "# Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ', SERVER_HOST, SERVER_PORT) srv.listen(8) serve(srv)", "(m is not None): letters = string.ascii_letters token = ''.join(random.choice(letters) for i in", "not None): auth = True break else: print('[S] Invalid message') if (__name__ ==", "int(argv[1]) print('[S] Auth server starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "s.sendall(str.encode(reply + '\\n')) auth = True getRequests = 0 while(auth): req = s.recv(1024).decode().strip()", "= True break else: print('[S] Invalid message') if (__name__ == '__main__'): # SERVER_PORT", "(__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth server starting. Press Ctrl+C", "re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1: print('[S] Waiting", "= re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''')", "print('[S] Invalid message') if (__name__ == '__main__'): # SERVER_PORT = int(argv[1]) print('[S] Auth", "Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk is not", "starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #", "+(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while", "reply) s.sendall(str.encode(reply + '\\n')) auth = False elif (m_rvk is not None): auth", "'\\n')) auth = True getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received:", "MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests < MAX_GET_REQUESTS):", "while (not auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m =", "re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def", "1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on ', SERVER_HOST, SERVER_PORT) srv.listen(8)", "from', address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting for request')", "print('[S] Closing connection') s.close() def handle_connection(s): print('[S] Waiting for request') auth = False", "print('[S] New connection from', address) handle_connection(s) print('[S] Closing connection') s.close() def handle_connection(s): print('[S]", "quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT))", "MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK +(.+)''') def serve(srv): while 1:", "False while (not auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m", "server starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)", "token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests =", "+ req) m = MSG_AUTH_RE.match(req) if (m is not None): letters = string.ascii_letters", "(m_get is not None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S]", "import random import string MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+)", "if (m_get is not None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES content'", "reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = False", "if (getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply", "MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES", "print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req) if (m is not None):", "getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m_get", "+ token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests", "auth): req = s.recv(1024).decode().strip() print('[S] Received: ' + req) m = MSG_AUTH_RE.match(req) if", "reply = 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests +=", "print('[S] Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get", "letters = string.ascii_letters token = ''.join(random.choice(letters) for i in range(20)) reply = 'SUCC", "auth = True break else: print('[S] Invalid message') if (__name__ == '__main__'): #", "in range(20)) reply = 'SUCC ' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply", "= 1335 MAX_GET_REQUESTS = 10 import re, socket import random import string MSG_AUTH_RE", "(s, address) = srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S] Closing connection')", "m_rvk = MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests < MAX_GET_REQUESTS): reply", "= MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests < MAX_GET_REQUESTS): reply =", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT srv.bind((SERVER_HOST, SERVER_PORT)) print('[S] Listening on", "Auth server starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,", "SERVER_HOST = '127.0.0.1' SERVER_PORT = 1335 MAX_GET_REQUESTS = 10 import re, socket import", "m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not None): if (getRequests", "1335 MAX_GET_REQUESTS = 10 import re, socket import random import string MSG_AUTH_RE =", "content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply", "while 1: print('[S] Waiting for new connections') (s, address) = srv.accept() print('[S] New", "MSG_AUTH_RE = re.compile('''^AUTH +(.+) +(.+)''') MSG_GET_RE = re.compile('''^GET +(.+) +(.+)''') MSG_RVK_RE = re.compile('''^RVK", "' + token print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = True", "for new connections') (s, address) = srv.accept() print('[S] New connection from', address) handle_connection(s)", "getRequests += 1 else: reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply +", "elif (m_rvk is not None): auth = True break else: print('[S] Invalid message')", "Closing connection') s.close() def handle_connection(s): print('[S] Waiting for request') auth = False while", "SERVER_PORT = 1335 MAX_GET_REQUESTS = 10 import re, socket import random import string", "' + req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not", "Received: ' + req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is", "for request') auth = False while (not auth): req = s.recv(1024).decode().strip() print('[S] Received:", "False elif (m_rvk is not None): auth = True break else: print('[S] Invalid", "(getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply +", "print('[S] Auth server starting. Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET,", "req) m = MSG_AUTH_RE.match(req) if (m is not None): letters = string.ascii_letters token", "Received: ' + req) m = MSG_AUTH_RE.match(req) if (m is not None): letters", "MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) getRequests", "def handle_connection(s): print('[S] Waiting for request') auth = False while (not auth): req", "connections') (s, address) = srv.accept() print('[S] New connection from', address) handle_connection(s) print('[S] Closing", "Waiting for request') auth = False while (not auth): req = s.recv(1024).decode().strip() print('[S]", "' + req) m = MSG_AUTH_RE.match(req) if (m is not None): letters =", "Press Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid", "s.close() def handle_connection(s): print('[S] Waiting for request') auth = False while (not auth):", "= MSG_AUTH_RE.match(req) if (m is not None): letters = string.ascii_letters token = ''.join(random.choice(letters)", "MAX_GET_REQUESTS = 10 import re, socket import random import string MSG_AUTH_RE = re.compile('''^AUTH", "req) m_get = MSG_GET_RE.match(req) m_rvk = MSG_RVK_RE.match(req) if (m_get is not None): if", "is not None): if (getRequests < MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying:", "< MAX_GET_REQUESTS): reply = 'RES content' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n'))", "else: reply = 'TIMEOUT' print('[S] Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth =", "Replying: ', reply) s.sendall(str.encode(reply + '\\n')) auth = True getRequests = 0 while(auth):", "string.ascii_letters token = ''.join(random.choice(letters) for i in range(20)) reply = 'SUCC ' +", "1: print('[S] Waiting for new connections') (s, address) = srv.accept() print('[S] New connection", "'\\n')) auth = False elif (m_rvk is not None): auth = True break", "+ '\\n')) auth = True getRequests = 0 while(auth): req = s.recv(1024).decode().strip() print('[S]", "auth = False while (not auth): req = s.recv(1024).decode().strip() print('[S] Received: ' +", "Ctrl+C to quit') srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Avoid TIME_WAIT", "s.sendall(str.encode(reply + '\\n')) getRequests += 1 else: reply = 'TIMEOUT' print('[S] Replying: '," ]
[ "= socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path", "super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport", "self.socket_path = socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self,", "socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path =", "self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self,", "return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr) super().__init__( \"http://\",", "<filename>utils/bundle/client.py<gh_stars>0 \"\"\" Adapted from https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a \"\"\" import http.client import socket import xmlrpc.client class", "\"\"\" Adapted from https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a \"\"\" import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection):", "class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host):", "socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM )", "http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX,", "xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class", "https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a \"\"\" import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock", "import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket(", "from https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a \"\"\" import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self):", "UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr) super().__init__( \"http://\", transport=transport, **kwargs )", ") self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def", "host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr) super().__init__(", "UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr) super().__init__( \"http://\", transport=transport,", "socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs):", "socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path", "object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path)", "import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host)", "def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class", "__init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy):", "make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr)", "socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def", "socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__()", "= socket_path super().__init__() def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr,", "connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self,", "UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path): self.socket_path = socket_path super().__init__() def make_connection(self, host): return", "def make_connection(self, host): return UnixStreamHTTPConnection(self.socket_path) class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport =", "\"\"\" import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock =", "Adapted from https://gist.github.com/grantjenks/095de18c51fa8f118b68be80a624c45a \"\"\" import http.client import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def", "class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport,", "import socket import xmlrpc.client class UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM", "class UnixStreamXMLRPCClient(xmlrpc.client.ServerProxy): def __init__(self, addr, **kwargs): transport = UnixStreamTransport(addr) super().__init__( \"http://\", transport=transport, **kwargs", "self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def __init__(self, socket_path):", "UnixStreamHTTPConnection(http.client.HTTPConnection): def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object):", "def connect(self): self.sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM ) self.sock.connect(self.host) class UnixStreamTransport(xmlrpc.client.Transport, object): def" ]
[ "class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as state_file: data = json.load(state_file)", "= 0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as state_file: data", "def _get_state(): with open(state_file_path, 'r') as state_file: data = json.load(state_file) if data['version'] ==", "data.pop('version', None) return data else: raise StateVersionException(f\"No logic to parse state with version:", "set_state_value(self, key, value): self.state[key] = value def get_value(self, key): return self.state[key] def __exit__(self,", "state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state = _get_state() pass", "json state_file_path = \"resources/state.json\" version = 0 class StateVersionException(Exception): pass def _get_state(): with", "version = 0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as state_file:", "raise StateVersionException(f\"No logic to parse state with version: {version} implemented\") def set_state(d): dc", "json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state = _get_state() pass def", "0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as state_file: data =", "= value def get_value(self, key): return self.state[key] def __exit__(self, exc_type, exc_val, exc_tb): set_state(self.state)", "'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state =", "value): self.state[key] = value def get_value(self, key): return self.state[key] def __exit__(self, exc_type, exc_val,", "{version} implemented\") def set_state(d): dc = d.copy() dc['version'] = version with open(state_file_path, 'w')", "== 0: data.pop('version', None) return data else: raise StateVersionException(f\"No logic to parse state", "'r') as state_file: data = json.load(state_file) if data['version'] == 0: data.pop('version', None) return", "= \"resources/state.json\" version = 0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r')", "class State: def __init__(self): self.state = _get_state() pass def set_state_value(self, key, value): self.state[key]", "= json.load(state_file) if data['version'] == 0: data.pop('version', None) return data else: raise StateVersionException(f\"No", "to parse state with version: {version} implemented\") def set_state(d): dc = d.copy() dc['version']", "version: {version} implemented\") def set_state(d): dc = d.copy() dc['version'] = version with open(state_file_path,", "\"resources/state.json\" version = 0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as", "data else: raise StateVersionException(f\"No logic to parse state with version: {version} implemented\") def", "dc = d.copy() dc['version'] = version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file,", "pass def _get_state(): with open(state_file_path, 'r') as state_file: data = json.load(state_file) if data['version']", "= version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State:", "key, value): self.state[key] = value def get_value(self, key): return self.state[key] def __exit__(self, exc_type,", "open(state_file_path, 'r') as state_file: data = json.load(state_file) if data['version'] == 0: data.pop('version', None)", "set_state(d): dc = d.copy() dc['version'] = version with open(state_file_path, 'w') as state_file: json.dump(dc,", "dc['version'] = version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class", "= _get_state() pass def set_state_value(self, key, value): self.state[key] = value def get_value(self, key):", "_get_state(): with open(state_file_path, 'r') as state_file: data = json.load(state_file) if data['version'] == 0:", "StateVersionException(Exception): pass def _get_state(): with open(state_file_path, 'r') as state_file: data = json.load(state_file) if", "0: data.pop('version', None) return data else: raise StateVersionException(f\"No logic to parse state with", "data['version'] == 0: data.pop('version', None) return data else: raise StateVersionException(f\"No logic to parse", "state_file_path = \"resources/state.json\" version = 0 class StateVersionException(Exception): pass def _get_state(): with open(state_file_path,", "None) return data else: raise StateVersionException(f\"No logic to parse state with version: {version}", "#!/usr/bin/python3 import json state_file_path = \"resources/state.json\" version = 0 class StateVersionException(Exception): pass def", "def __init__(self): self.state = _get_state() pass def set_state_value(self, key, value): self.state[key] = value", "state with version: {version} implemented\") def set_state(d): dc = d.copy() dc['version'] = version", "_get_state() pass def set_state_value(self, key, value): self.state[key] = value def get_value(self, key): return", "with version: {version} implemented\") def set_state(d): dc = d.copy() dc['version'] = version with", "as state_file: data = json.load(state_file) if data['version'] == 0: data.pop('version', None) return data", "json.load(state_file) if data['version'] == 0: data.pop('version', None) return data else: raise StateVersionException(f\"No logic", "return data else: raise StateVersionException(f\"No logic to parse state with version: {version} implemented\")", "open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state", "sort_keys=True, indent=4) class State: def __init__(self): self.state = _get_state() pass def set_state_value(self, key,", "version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def", "__init__(self): self.state = _get_state() pass def set_state_value(self, key, value): self.state[key] = value def", "self.state = _get_state() pass def set_state_value(self, key, value): self.state[key] = value def get_value(self,", "state_file: data = json.load(state_file) if data['version'] == 0: data.pop('version', None) return data else:", "import json state_file_path = \"resources/state.json\" version = 0 class StateVersionException(Exception): pass def _get_state():", "State: def __init__(self): self.state = _get_state() pass def set_state_value(self, key, value): self.state[key] =", "if data['version'] == 0: data.pop('version', None) return data else: raise StateVersionException(f\"No logic to", "data = json.load(state_file) if data['version'] == 0: data.pop('version', None) return data else: raise", "with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self):", "indent=4) class State: def __init__(self): self.state = _get_state() pass def set_state_value(self, key, value):", "def set_state_value(self, key, value): self.state[key] = value def get_value(self, key): return self.state[key] def", "implemented\") def set_state(d): dc = d.copy() dc['version'] = version with open(state_file_path, 'w') as", "else: raise StateVersionException(f\"No logic to parse state with version: {version} implemented\") def set_state(d):", "state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state = _get_state() pass def set_state_value(self,", "pass def set_state_value(self, key, value): self.state[key] = value def get_value(self, key): return self.state[key]", "parse state with version: {version} implemented\") def set_state(d): dc = d.copy() dc['version'] =", "StateVersionException(f\"No logic to parse state with version: {version} implemented\") def set_state(d): dc =", "logic to parse state with version: {version} implemented\") def set_state(d): dc = d.copy()", "def set_state(d): dc = d.copy() dc['version'] = version with open(state_file_path, 'w') as state_file:", "d.copy() dc['version'] = version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True, indent=4)", "with open(state_file_path, 'r') as state_file: data = json.load(state_file) if data['version'] == 0: data.pop('version',", "as state_file: json.dump(dc, state_file, sort_keys=True, indent=4) class State: def __init__(self): self.state = _get_state()", "= d.copy() dc['version'] = version with open(state_file_path, 'w') as state_file: json.dump(dc, state_file, sort_keys=True,", "self.state[key] = value def get_value(self, key): return self.state[key] def __exit__(self, exc_type, exc_val, exc_tb):" ]
[ "in tests folder # we dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1]", "we dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase):", "this folder because in tests folder # we dont have the receivers file", "unittest from .context import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set", "rain_alert.utils import RECEIVERS_FILE_PATH import unittest from .context import utils import os class TestGetReceivers(unittest.TestCase):", "import RECEIVERS_FILE_PATH import unittest from .context import utils import os class TestGetReceivers(unittest.TestCase): def", "class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of the receivers to this", "# we dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class", "import unittest from .context import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): #", "of the receivers to this folder because in tests folder # we dont", "dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ...", "utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\" if __name__ == '__main__':", "from rain_alert.utils import RECEIVERS_FILE_PATH import unittest from .context import utils import os class", "the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\" if", "from .context import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the", "folder # we dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\"", "# set the path of the receivers to this folder because in tests", "utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of the", "path of the receivers to this folder because in tests folder # we", "folder because in tests folder # we dont have the receivers file utils.RECEIVERS_FILE_PATH", "receivers to this folder because in tests folder # we dont have the", "tests folder # we dont have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True)", "set the path of the receivers to this folder because in tests folder", "def test_no_file(self): # set the path of the receivers to this folder because", "to this folder because in tests folder # we dont have the receivers", "the path of the receivers to this folder because in tests folder #", "file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\" if __name__ ==", "test_no_file(self): # set the path of the receivers to this folder because in", "the receivers to this folder because in tests folder # we dont have", "TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of the receivers to this folder", ".context import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path", "because in tests folder # we dont have the receivers file utils.RECEIVERS_FILE_PATH =", "os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of the receivers to", "RECEIVERS_FILE_PATH import unittest from .context import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self):", "import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of the receivers", "have the receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\"", "= os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\" if __name__ == '__main__': unittest.main()", "import utils import os class TestGetReceivers(unittest.TestCase): def test_no_file(self): # set the path of", "receivers file utils.RECEIVERS_FILE_PATH = os.path.split(utils.RECEIVERS_FILE_PATH)[-1] self.assertTrue(True) \"\"\" class TestGetCredentials(unittest.TestCase): ... \"\"\" if __name__" ]
[ "79, 70, 72, 72, 74, 72 ,72, 70, 70, 73, 71, 73, 72,", "moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange = np.round(np.std(orange), 2)", "74, 72, 72, 72, 67, 69, 76, 76, 77, 77, 74, 72, 79,", "[71, 69, 69, 70, 73, 70, 75, 75, 74, 72, 72, 72, 67,", "74, 72, 79, 79, 71, 71, 75, 74, 74, 73, 72, 73, 74]", "import matplotlib.pyplot as plt noir = [71, 69, 69, 70, 73, 70, 75,", "moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu =", "2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange", "76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2)", "plt noir = [71, 69, 69, 70, 73, 70, 75, 75, 74, 72,", "73, 72, 71, 71, 70] orange = [73, 72, 71, 75, 72, 75,", "72, 71, 75, 72, 75, 70, 70, 70, 73, 74, 72, 73, 73,", "2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu", "= np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange = np.round(np.std(orange), 2) print(stdNoir, stdBleu,", "= [73, 72, 71, 75, 72, 75, 70, 70, 70, 73, 74, 72,", "67, 69, 76, 76, 77, 77, 74, 72, 79, 79, 71, 71, 75,", "75, 72, 75, 70, 70, 70, 73, 74, 72, 73, 73, 72, 72,", "72, 72, 73, 72, 71, 71, 70] orange = [73, 72, 71, 75,", "79, 79, 70, 72, 72, 74, 72 ,72, 70, 70, 73, 71, 73,", "74, 73, 82, 79, 79, 70, 72, 72, 74, 72 ,72, 70, 70,", "69, 76, 76, 77, 77, 74, 72, 79, 79, 71, 71, 75, 74,", "70] orange = [73, 72, 71, 75, 72, 75, 70, 70, 70, 73,", "76, 76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu),", "= np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu),", "69, 74, 71, 71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir),", "74, 74, 73, 82, 79, 79, 70, 72, 72, 74, 72 ,72, 70,", "69, 69, 74, 71, 71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir =", "72, 72, 71, 71, 72, 69, 69, 74, 71, 71, 76, 76, 73,", "71, 71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu", "74, 73, 72, 73, 74] bleu = [73, 72, 73, 71, 71, 76,", "= np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir),", "73, 71, 73, 72, 72, 73, 72, 71, 71, 70] orange = [73,", "70, 70, 70, 73, 74, 72, 73, 73, 72, 72, 72, 71, 71,", "76, 77, 74, 74, 73, 82, 79, 79, 70, 72, 72, 74, 72", "70, 70, 73, 74, 72, 73, 73, 72, 72, 72, 71, 71, 72,", "72, 72, 67, 69, 76, 76, 77, 77, 74, 72, 79, 79, 71,", "72, 69, 69, 74, 71, 71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir", "= [73, 72, 73, 71, 71, 76, 77, 74, 74, 73, 82, 79,", "73, 74] bleu = [73, 72, 73, 71, 71, 76, 77, 74, 74,", "74] bleu = [73, 72, 73, 71, 71, 76, 77, 74, 74, 73,", "noir = [71, 69, 69, 70, 73, 70, 75, 75, 74, 72, 72,", "72, 72, 72, 67, 69, 76, 76, 77, 77, 74, 72, 79, 79,", "73, 72, 72, 73, 72, 71, 71, 70] orange = [73, 72, 71,", "= [71, 69, 69, 70, 73, 70, 75, 75, 74, 72, 72, 72,", "75, 75, 74, 72, 72, 72, 67, 69, 76, 76, 77, 77, 74,", "np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2)", "71, 71, 70] orange = [73, 72, 71, 75, 72, 75, 70, 70,", "72, 72, 72, 71, 71, 72, 69, 69, 74, 71, 71, 76, 76,", "71, 72, 69, 69, 74, 71, 71, 76, 76, 73, 73, 84, 68]", "69, 69, 70, 73, 70, 75, 75, 74, 72, 72, 72, 67, 69,", "as plt noir = [71, 69, 69, 70, 73, 70, 75, 75, 74,", "matplotlib.pyplot as plt noir = [71, 69, 69, 70, 73, 70, 75, 75,", ",72, 70, 70, 73, 71, 73, 72, 72, 73, 72, 71, 71, 70]", "74, 74, 73, 72, 73, 74] bleu = [73, 72, 73, 71, 71,", "[73, 72, 71, 75, 72, 75, 70, 70, 70, 73, 74, 72, 73,", "75, 70, 70, 70, 73, 74, 72, 73, 73, 72, 72, 72, 71,", "72, 74, 72 ,72, 70, 70, 73, 71, 73, 72, 72, 73, 72,", "75, 74, 72, 72, 72, 67, 69, 76, 76, 77, 77, 74, 72,", "72, 72, 74, 72 ,72, 70, 70, 73, 71, 73, 72, 72, 73,", "2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir", "73, 72, 73, 74] bleu = [73, 72, 73, 71, 71, 76, 77,", "73, 74, 72, 73, 73, 72, 72, 72, 71, 71, 72, 69, 69,", "75, 74, 74, 73, 72, 73, 74] bleu = [73, 72, 73, 71,", "72, 73, 71, 71, 76, 77, 74, 74, 73, 82, 79, 79, 70,", "np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2)", "72, 73, 72, 71, 71, 70] orange = [73, 72, 71, 75, 72,", "72, 75, 70, 70, 70, 73, 74, 72, 73, 73, 72, 72, 72,", "np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange = np.round(np.std(orange), 2) print(stdNoir, stdBleu, stdOrange)\"\"\"", "76, 77, 77, 74, 72, 79, 79, 71, 71, 75, 74, 74, 73,", "71, 76, 77, 74, 74, 73, 82, 79, 79, 70, 72, 72, 74,", "np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange)", "71, 71, 72, 69, 69, 74, 71, 71, 76, 76, 73, 73, 84,", "68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2)", "72, 73, 74] bleu = [73, 72, 73, 71, 71, 76, 77, 74,", "73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange", "84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange),", "72, 67, 69, 76, 76, 77, 77, 74, 72, 79, 79, 71, 71,", "71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu =", "moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange = np.round(np.std(orange),", "71, 71, 75, 74, 74, 73, 72, 73, 74] bleu = [73, 72,", "72, 73, 73, 72, 72, 72, 71, 71, 72, 69, 69, 74, 71,", "73, 72, 72, 72, 71, 71, 72, 69, 69, 74, 71, 71, 76,", "np import matplotlib.pyplot as plt noir = [71, 69, 69, 70, 73, 70,", "print(moyNoir, moyBleu, moyOrange) stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange =", "stdNoir = np.round(np.std(noir), 2) stdBleu = np.round(np.std(bleu), 2) stdOrange = np.round(np.std(orange), 2) print(stdNoir,", "79, 71, 71, 75, 74, 74, 73, 72, 73, 74] bleu = [73,", "= np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu,", "73, 70, 75, 75, 74, 72, 72, 72, 67, 69, 76, 76, 77,", "numpy as np import matplotlib.pyplot as plt noir = [71, 69, 69, 70,", "71, 70] orange = [73, 72, 71, 75, 72, 75, 70, 70, 70,", "74, 72 ,72, 70, 70, 73, 71, 73, 72, 72, 73, 72, 71,", "74, 71, 71, 76, 76, 73, 73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2)", "import numpy as np import matplotlib.pyplot as plt noir = [71, 69, 69,", "77, 74, 72, 79, 79, 71, 71, 75, 74, 74, 73, 72, 73,", "orange = [73, 72, 71, 75, 72, 75, 70, 70, 70, 73, 74,", "71, 75, 74, 74, 73, 72, 73, 74] bleu = [73, 72, 73,", "moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir, moyBleu, moyOrange) stdNoir =", "70, 73, 71, 73, 72, 72, 73, 72, 71, 71, 70] orange =", "70, 72, 72, 74, 72 ,72, 70, 70, 73, 71, 73, 72, 72,", "72, 79, 79, 71, 71, 75, 74, 74, 73, 72, 73, 74] bleu", "72, 71, 71, 70] orange = [73, 72, 71, 75, 72, 75, 70,", "72 ,72, 70, 70, 73, 71, 73, 72, 72, 73, 72, 71, 71,", "\"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange = np.round(np.mean(orange), 2) print(moyNoir,", "79, 79, 71, 71, 75, 74, 74, 73, 72, 73, 74] bleu =", "77, 74, 74, 73, 82, 79, 79, 70, 72, 72, 74, 72 ,72,", "71, 75, 72, 75, 70, 70, 70, 73, 74, 72, 73, 73, 72,", "bleu = [73, 72, 73, 71, 71, 76, 77, 74, 74, 73, 82,", "71, 73, 72, 72, 73, 72, 71, 71, 70] orange = [73, 72,", "71, 71, 76, 77, 74, 74, 73, 82, 79, 79, 70, 72, 72,", "73, 84, 68] \"\"\"moyNoir = np.round(np.mean(noir), 2) moyBleu = np.round(np.mean(bleu), 2) moyOrange =", "70, 70, 73, 71, 73, 72, 72, 73, 72, 71, 71, 70] orange", "[73, 72, 73, 71, 71, 76, 77, 74, 74, 73, 82, 79, 79,", "77, 77, 74, 72, 79, 79, 71, 71, 75, 74, 74, 73, 72,", "74, 72, 73, 73, 72, 72, 72, 71, 71, 72, 69, 69, 74,", "as np import matplotlib.pyplot as plt noir = [71, 69, 69, 70, 73,", "73, 73, 72, 72, 72, 71, 71, 72, 69, 69, 74, 71, 71,", "69, 70, 73, 70, 75, 75, 74, 72, 72, 72, 67, 69, 76,", "70, 75, 75, 74, 72, 72, 72, 67, 69, 76, 76, 77, 77,", "72, 71, 71, 72, 69, 69, 74, 71, 71, 76, 76, 73, 73,", "70, 73, 74, 72, 73, 73, 72, 72, 72, 71, 71, 72, 69,", "76, 76, 77, 77, 74, 72, 79, 79, 71, 71, 75, 74, 74,", "73, 82, 79, 79, 70, 72, 72, 74, 72 ,72, 70, 70, 73,", "73, 71, 71, 76, 77, 74, 74, 73, 82, 79, 79, 70, 72,", "70, 73, 70, 75, 75, 74, 72, 72, 72, 67, 69, 76, 76,", "82, 79, 79, 70, 72, 72, 74, 72 ,72, 70, 70, 73, 71," ]
[ "Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db import migrations,", "2021-10-18 12:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL,", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations =", "), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário'),", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil',", "Generated by Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'),", "by Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db import", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ),", "model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário'), ), ]", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [", "# Generated by Django 3.2.7 on 2021-10-18 12:21 from django.conf import settings from", "12:21 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "3.2.7 on 2021-10-18 12:21 from django.conf import settings from django.db import migrations, models", "[ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ),", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'),", "('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField(", "name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario',", "= [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField(", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ]", "migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário'), ),", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5,", "model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil',", "field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero', field=models.CharField(max_length=5, verbose_name='Número'), ), migrations.AlterField( model_name='perfil', name='usuario', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,", "'0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil',", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil',", "] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50, verbose_name='Endereço'), ), migrations.AlterField( model_name='perfil', name='numero',", "on 2021-10-18 12:21 from django.conf import settings from django.db import migrations, models import", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('perfil', '0001_initial'), ] operations = [ migrations.AlterField( model_name='perfil', name='endereco', field=models.CharField(max_length=50," ]
[ "= client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except:", "cells to the end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5)", "= options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca()", "maxDate = data[0]['ServerDate'] # except: # minDate ='' # maxDate = '' textcolor", "= plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb", "gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if not", "= data2 if squadron != '': currentMonthSQ = datetime.now().month print('skipping landings not in", "of data array: ' , str(len(data))) count = 0 if squadron != '':", "ruleset = 'best' #print('Length of argv: ' , len(sys.argv)); if len(sys.argv) >= 2:", "== 5.5: color = bluegraycolor else: color = blankcell return color def calculateGradeTailhooker(curList):", "= '' textcolor = '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor)", "# set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron", "if len(i) > maxLength: maxLength = len(i) if maxLength < 17: maxLength =", "squadron is empty then lets trim the landings not in the current month", "minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx =", "= tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr =", "List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path):", "#colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor = '#835C3B' #", "= '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718']", "if '3' in ij['icon']: text = case3 elif '2' in ij['icon']: text =", "remaining cells to the end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'", "text = '' if count < len(titlestr): text = titlestr[count] count = count", "color = blankcell text = '' if '3' in ij['icon']: text = case3", "if imonth == currentMonthSQ: data2.append(i) data = data2 for i in reversed(data): name", "boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score", "print('size of data array: ' , str(len(data))) count = 0 if squadron !=", "# pilotDict[name]=pilotRow options = {} if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe']", "= i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data = data2", "name = pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText", "gradeCell['icon'] += '3' if i['case'] == 2 and not '2' in gradeCell['icon']: gradeCell['icon']", "# grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList = []; # curList.append(i)", "cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx", "matplotlib.pyplot as plt import matplotlib.colors as mcolors from matplotlib.table import Table from matplotlib.font_manager", "for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off')", "data: name = i['pilot'] #print('Name: ' , name) name = name.replace('-', '') name", "if minRows < 12: minRows = 12 #for p_idx in range(0,len(pilots)): for p_idx", "# add the remaining cells to the end for f in range(col_idx,options['maxCols']+2): cell", "= str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with", "airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was", "cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0)", "pt # if not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor =", "gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor", "== 0: color=blackcolor elif g == 1: color=redcolor elif g == 2.0: color=browncolor", "1: print('Updating from Google.') updateFromGoogle() else: print('Less than one hour since last refresh,", "'#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor =", "time from datetime import datetime import matplotlib.pyplot as plt import matplotlib.colors as mcolors", "= name.replace(squadron,'') # if the squadron was empty just keep the original data", "len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓'", "ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList)", "cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board ' + minDate + '", "#grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then lets trim", "= colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']: # print('what') return gradeCell", "os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else: print('Less", "data array: ' , str(len(data))) count = 0 if airframe != '': data2", "= '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor =", "Board # set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if", "'⊙' case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen']", "if the squadron was empty just keep the original data data = data2", "- getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else: print('Less than one hour", "Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width,", "' , str(len(data))) count = 0 if airframe != '': data2 = []", "score: ' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {} if", "ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options =", "'2' in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size()", "# browncolor = '#835C3B' # orangecolor = '#d17a00' # yellowcolor = '#b6c700' #", "datetime import matplotlib.pyplot as plt import matplotlib.colors as mcolors from matplotlib.table import Table", "elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe =", "= greencolor elif g == 5.5: color = bluegraycolor else: color = blankcell", "grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then lets trim the landings", "keep only a specified airframe data2 = data print('... size of data array:", "'○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718']", "and '5' in g['icon']: text = goldstar elif '3' in g['icon']: text =", "j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: #", "yellowcolor = '#b6c700' # greencolor = '#0bab35' # bluecolor = '#01A2EA' #try: #", "= pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower()", "/ n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 =", "maxLength < options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax =", "not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts))", "{} pilotDict = {} # get the rows as they will appear in", "from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last", "maxLength = len(i) if maxLength < options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6,", "empty') if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg']", "= '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor", "data = data2 for i in reversed(data): name = i['pilot'] if name not", "= []; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for", "import matplotlib.pyplot as plt import matplotlib.colors as mcolors from matplotlib.table import Table from", "= tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx +", "== i['ServerDate']: # curList.append(i) # # else: # curDate = i['ServerDate'] # grade", "= []; uniqueDates = [] for i in reversed(data): #grade = grade0 if", "== 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates", "rd = pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if", "5: color = greencolor elif g == 5.5: color = bluegraycolor else: color", "4.5: color = greencolor elif g == 5: color = greencolor elif g", "return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon']", "= tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd: color", "avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor)", "import numpy as np import statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials", "'') name = name.replace(']', '') name = name.replace('|', '') name = name.replace('\\\\', '')", "cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5)", "maxDate minRows = len(pilots) if minRows < 12: minRows = 12 #for p_idx", "= '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell =", "#print(i) curList = []; for j in data: if name == j['pilot'] and", "1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList,", "'': data2 = [] print('Keeping only grades for airframe: ', airframe) for i", "in g['icon']: text = case3 elif '5' in g['icon']: text = anchor elif", "= case3 elif '2' in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'", "= calculateGrade(curList, grade0) # boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow) return", "len(pilots) if minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for", "ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\"))", "g['icon']: text = goldstar elif '3' in g['icon']: text = case3 elif '5'", "count = count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center',", "if squadron == '': currentMonth = datetime.now().month print('skipping landings not in current month')", "= ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with", "reversed(data): #grade = grade0 if name == i['pilot']: if i['ServerDate'] not in uniqueDates:", "maxLength: maxLength = len(i) if maxLength < 17: maxLength = 17 fig =", "len(i) > maxLength: maxLength = len(i) if maxLength < 17: maxLength = 17", "'best' #print('Length of argv: ' , len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1])", "+ maxDate minRows = len(pilots) if minRows < options['maxRows']: minRows = options['maxRows'] #for", "options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1", "for i in curList: if i['case'] == 3 and not '3' in gradeCell['icon']:", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text='' # add", "try: tmp = float(i['points']) if tmp > pt: pt = tmp if tmp", "def calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates = [] for i", "i in pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength <", "f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off()", "else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if", "text = '' if '5.5' in g['icon']: text = shithot elif '3' in", "#plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for i in pilotRows:", "< options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows):", "p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd = []", "= '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718']", "col_idx = 2 for g in rd: color = g['bg'] text = ''", "if len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg']", "tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor", "matplotlib.colors as mcolors from matplotlib.table import Table from matplotlib.font_manager import FontProperties import numpy", "= pt # if not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor", "= airframe options['squadron'] = squadron options['ruleset'] = ruleset options['maxRows']=10 options['maxCols']=17 plotSquadron(pilotRows, options) print('done')", "textcolor = '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt')", "tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0)", "print('Less than one hour since last refresh, skipping pull from google.') def updateFromGoogle():", "1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts = [] count = 0", "curList = []; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0", "= ' JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count", "' to ' + maxDate minRows = len(pilots) if minRows < 12: minRows", "in pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength < 17:", "in rd: color = g['bg'] text = '' if '5.5' in g['icon']: text", "'#835C3B' # orangecolor = '#d17a00' # yellowcolor = '#b6c700' # greencolor = '#0bab35'", "'2' in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) #", "cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx =", "finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0", "[] print('Skipping WOFDs') for i in data: if not'WOFD' in i['grade']: data2.append(i) data", "= 0 for i in pilotRows: if len(i) > maxLength: maxLength = len(i)", "for ij in rd: color = ij['bg'] if not color: color = blankcell", "curDate == i['ServerDate']: # curList.append(i) # # else: # curDate = i['ServerDate'] #", "height = 100 / n_cols, 100.0 / n_rows #height = height/10 shithot ='🎖️'", "# cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text=''", "pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1)", "= 100 / n_cols, 100.0 / n_rows #height = height/10 shithot ='🎖️' anchor='⚓'", "= '' textcolor = '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor)", "ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe", "calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates = [] for", "= tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in rd:", "# maxDate = data[0]['ServerDate'] #except: # minDate ='' # maxDate = '' textcolor", "import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print ('Number", "color def calculateGradeTailhooker(curList): # loop through their grades and find their FIRST wire", "pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength < 17: maxLength", "cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in rd: color = ij['bg']", "color = yellowcolor elif g == 4.0: color = greencolor elif g ==", "='' # maxDate = '' textcolor = '#000000' edgecolor = '#708090' cell =", "#print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow: score", "cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def", "if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron =", "in rd: color = ij['bg'] if not color: color = blankcell text =", "for i in pilotRow: score = score + i['score'] finalscore = score/len(pilotRow) #print(finalscore)", "avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese': name = \"SippyCup\"", "= round(avg,1) if name.lower() == 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none'", "gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points']) if tmp > pt: pt", "titlestr = ' '+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2): text =", "tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows =", "return print('Local HypeMan LSO grade database updated from Google Sheets.') def getModificationTimeSeconds(path): ct", "pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() ==", "name == i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates:", "gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case']", "if curDate == i['ServerDate']: # curList.append(i) # # else: # curDate = i['ServerDate']", "[]; uniqueDates = [] for i in reversed(data): #grade = grade0 if name", "print('... size of data array: ' , str(len(data))) count = 0 if airframe", "in range(2,options['maxCols']+2): text = '' if count < len(titlestr): text = titlestr[count] count", "height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW", "= datetime.now().month print('skipping landings not in current month') for i in data: #print(i)", "= tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr", "outfile) except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO grade database updated", "= count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell)", "idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth: data2.append(i) data =", "if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] = squadron", "'3' in g['icon']: text = case3 elif '5' in g['icon']: text = anchor", "0 for col_idx in range(2,options['maxCols']+2): text = '' if count < len(titlestr): text", "color = greencolor elif g == 5: color = greencolor elif g ==", "continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score']", "through and keep only a specified airframe data2 = data print('... size of", "for i in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth", "the remaining cells to the end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color)", "= '•' case3= '◉' case2 = '⊙' case2 = '○' #case2 = '○'", "= blankcell text = '' if '3' in ij['icon']: text = case3 elif", "'': data2 = [] print('Searching for squadron: ' , squadron) for i in", "updateFromGoogle() else: print('Less than one hour since last refresh, skipping pull from google.')", "== 3 and not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] ==", "= Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1", "', i['airframe'], ' was looking for: ' , airframe) # data.remove(i) count =", "data.remove(i) count = count + 1 print('Number of rows kept: ', str(count)) data", "1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100 /", "i['pilot'] #print('Name: ' , name) name = name.replace('-', '') name = name.replace('_', '')", "# set defaults airframe = '' squadron = '' ruleset = 'best' #print('Length", "# try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: #", "2.5: color=bluecolor elif g == 3.0: color = yellowcolor elif g == 4.0:", "data[0]['ServerDate'] # except: # minDate ='' # maxDate = '' textcolor = '#FFFFF0'", "' to ' + maxDate minRows = len(pilots) if minRows < options['maxRows']: minRows", "'5' in g['icon']: text = anchor elif '2' in g['icon']: text = case2", "= 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file) # go through", "str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA'", "1 #print(' Calculate grade iteration: ', count) # skip WOFDS if 'WOFD' in", "j in data: if name == j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade", "in g['icon']: text = shithot elif '3' in g['icon'] and '5' in g['icon']:", "int(idate[1]) if imonth == currentMonth: data2.append(i) data = data2 if squadron != '':", "options = {} if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe", "== '': currentMonth = datetime.now().month print('skipping landings not in current month') for i", "currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count = 0 for col_idx in", "modification_time) except OSError: print(\"Path '%s' does not exists or is inaccessible\" %path) return", "return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates = []", "return gradeCell if len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] =", "from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client", "len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd)", "shithot elif '3' in g['icon'] and '5' in g['icon']: text = goldstar elif", "import ServiceAccountCredentials #from datetime import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print", "updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file) # go through and keep", "if maxLength < options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax", ", pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {} if squadron ==", "count) # skip WOFDS if 'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.')", "i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength =", "!= '': data2 = [] print('Searching for squadron: ' , squadron) for i", "in g['icon']: text = anchor elif '2' in g['icon']: text = case2 cell", "'') name = name.lower() index = name.find(squadron) if index != -1: data2.append(i) count", "= 2 for g in rd: color = g['bg'] text = '' if", "# # else: # curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0) #", "# except: # minDate ='' # maxDate = '' textcolor = '#FFFFF0' edgecolor", "name.find(squadron) if index != -1: data2.append(i) count = count + 1; #print('Keeping in", "ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file) #", "# name = name.replace(squadron,'') # if the squadron was empty just keep the", "index = name.find(squadron) if index != -1: data2.append(i) count = count + 1;", "gradeCell['icon'] += '2' try: tmp = float(i['points']) if tmp > pt: pt =", "data2 for i in reversed(data): name = i['pilot'] if name not in pilots:", "= gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile:", "g == 3.0: color = yellowcolor elif g == 4.0: color = greencolor", "= '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5)", "except: # minDate ='' # maxDate = '' textcolor = '#FFFFF0' edgecolor =", "text='' # add the remaining cells to the end for f in range(col_idx,options['maxCols']+2):", "greencolor = '#0bab35' # bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate'] #", "in data: # if i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in airframe:", "data: if not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number remaining: ', str(len(data)))", "== i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i)", "len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData", "cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron']", "in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off')", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\"))", "elif '5' in g['icon']: text = anchor elif '2' in g['icon']: text =", "ij in rd: color = ij['bg'] if not color: color = blankcell text", "== j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if", "if minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx", "pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']: # print('what')", "print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time() -", "print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor =", "time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time) except", "bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor", "== '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] =", "for: ' , airframe) # data.remove(i) count = count + 1 print('Number of", "= blankcell return color def calculateGradeTailhooker(curList): # loop through their grades and find", "calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate = i['ServerDate'] # haveDate =", "name = name.lower() index = name.find(squadron) if index != -1: data2.append(i) count =", "in i['grade']: data2.append(i) data = data2 print('Number remaining: ', str(len(data))) pilots = []", "in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\"))", "maxDate = data[0]['ServerDate'] #except: # minDate ='' # maxDate = '' textcolor =", "for i in uniqueDates: #print(i) curList = []; for j in data: if", "import FontProperties import numpy as np import statistics import sys, getopt from oauth2client.service_account", "= case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx =", "n_cols, 100.0 / n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐'", "= calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate = i['ServerDate'] # haveDate", "= len(pilots) if minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)):", "= name.replace('[', '') name = name.replace(']', '') name = name.replace('|', '') name =", "= '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor =", "return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] = ''", "ruleset): #print(name) boardRow = []; uniqueDates = [] for i in reversed(data): #grade", "and find their FIRST wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] =", "remaining: ', str(len(data))) pilots = [] pilotRows = {} pilotDict = {} #", "pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell =", "add the remaining cells to the end for f in range(col_idx,maxLength+2): cell =", "in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2 and not '2' in", "boardRow = []; uniqueDates = [] for i in reversed(data): #grade = grade0", "data2 if squadron != '': currentMonthSQ = datetime.now().month print('skipping landings not in current", "= {} if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron']", "pilotRow: score = score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def", "= len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows shithot ='🎖️'", "i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was looking for:", "data: if name == j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset)", "data[0]['ServerDate'] #except: # minDate ='' # maxDate = '' textcolor = '#000000' edgecolor", "'#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B'", "str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count = 0 for col_idx in range(2,maxLength+2):", "if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = '", "in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow)", "for i in data: # if i['airframe'] #if i['airframe'] == airframe: if i['airframe']", "i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate =", "maxLength = 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([])", "color=bluegraycolor elif g == 0: color=blackcolor elif g == 1: color=redcolor elif g", "gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts = [] count", "3.0: color = yellowcolor elif g == 4.0: color = greencolor elif g", "#edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight',", "color=browncolor elif g == 2.5: color=bluecolor elif g == 3.0: color = yellowcolor", "Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time", "' + maxDate minRows = len(pilots) if minRows < options['maxRows']: minRows = options['maxRows']", "def plotDefaultBoard(pilotRows, options): maxLength = 0 for i in pilotRows: if len(i) >", "gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts = [] count = 0 for", "print('Deleting airframe: ', i['airframe'], ' was looking for: ' , airframe) # data.remove(i)", "default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then", "> maxLength: maxLength = len(i) if maxLength < 17: maxLength = 17 fig", ">= 2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) ==", "i['grade']: data2.append(i) data = data2 print('Number remaining: ', str(len(data))) pilots = [] pilotRows", "= Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows", "return gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset", "case3 elif '2' in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412')", "name = pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd)", "'') name = name.replace('[', '') name = name.replace(']', '') name = name.replace('|', '')", "#tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100", "curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow:", "print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO grade database updated from Google", "i in data: # if i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in", "gradeCell['score'] = pt # if not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g):", "rd: color = g['bg'] text = '' if '5.5' in g['icon']: text =", "[] print('Keeping only grades for airframe: ', airframe) for i in data: #", "minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: # minDate ='' #", "'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1])", "= count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell)", "gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i in curList: if i['case'] ==", "'#FFFFFF' pts = [] count = 0 for i in curList: count =", "str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe =", "= pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText =", "cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT OPS WING'", "gradeCell if len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts)", "of argv: ' , len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey':", "grades for airframe: ', airframe) for i in data: # if i['airframe'] #if", "array: ' , str(len(data))) count = 0 if airframe != '': data2 =", "[] count = 0 for i in curList: count = count + 1", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd: color = g['bg'] text", "from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print ('Number of arguments:', len(sys.argv),", "i in data: if not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number remaining:", "data print('... size of data array: ' , str(len(data))) count = 0 if", "= '#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: #", "except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO grade database updated from", "only grades for airframe: ', airframe) for i in data: # if i['airframe']", "import gspread import json import os import time from datetime import datetime import", "= '⊙' case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF'", "plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = '' squadron = '' ruleset =", "if squadron is empty then lets trim the landings not in the current", "[] name = '' scoreText = '' if p_idx < len(pilots): name =", "grade iteration: ', count) # skip WOFDS if 'WOFD' in i['grade']: continue if", "= score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options):", "#078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E'", "count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor)", "database updated from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time =", "if name == i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in", "# yellowcolor = '#b6c700' # greencolor = '#0bab35' # bluecolor = '#01A2EA' #try:", "tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color", "tmp if tmp == 5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except:", "height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 = '•'", "<reponame>robscallsign/HypeMan import gspread import json import os import time from datetime import datetime", "tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor)", "gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset", "Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path)", "', str(count)) data = data2 print('size of data array: ' , str(len(data))) count", "gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt", "uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList = []; for j in", "= name.replace('-', '') name = name.replace('_', '') name = name.replace('[', '') name =", "= ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) ==", "scoreText = round(avg,1) if name.lower() == 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue')", "in reversed(data): name = i['pilot'] if name not in pilots: pilots.append(name) pilotRow =", "2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) >", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor)", "iteration: ', count) # skip WOFDS if 'WOFD' in i['grade']: continue if not", "not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points']) if tmp", "str(len(data))) count = 0 if squadron != '': data2 = [] print('Searching for", "#print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2:", "empty then lets trim the landings not in the current month data2 =", "ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength", "elif '2' in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5)", "= pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText", "{} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts = []", "= options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1", "0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height =", "'#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00'", "i['ServerDate'] # haveDate = True # # if curDate == i['ServerDate']: # curList.append(i)", "empty just keep the original data data = data2 data2 = [] print('Skipping", "curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate = i['ServerDate']", "# data.remove(i) count = count + 1 print('Number of rows kept: ', str(count))", "= '★' case3 = '•' case3= '◉' case2 = '⊙' case2 = '○'", "in squadron: ' , name) # name = name.replace(squadron,'') # if the squadron", "name.replace('-', '') name = name.replace('_', '') name = name.replace('[', '') name = name.replace(']',", "= count + 1 #print(' Calculate grade iteration: ', count) # skip WOFDS", "the current month data2 = [] if squadron == '': currentMonth = datetime.now().month", "= bluegraycolor else: color = blankcell return color def calculateGradeTailhooker(curList): # loop through", "current month data2 = [] if squadron == '': currentMonth = datetime.now().month print('skipping", "not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor =", "name.replace('\\\\', '') name = name.replace('/', '') name = name.replace('@', '') name = name.lower()", "if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow =", "'' if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] # avg", "in our Greenie Board # set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x';", "not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] =", "print('Local HypeMan LSO grade database updated from Google Sheets.') def getModificationTimeSeconds(path): ct =", "name.replace('/', '') name = name.replace('@', '') name = name.lower() index = name.find(squadron) if", "gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset ==", "print('Updating from Google.') updateFromGoogle() else: print('Less than one hour since last refresh, skipping", "creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records()", "in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ:", "curList: count = count + 1 #print(' Calculate grade iteration: ', count) #", "else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3'", "= {} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts =", "= '⊙' case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A'", "pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts)", "will appear in our Greenie Board # set the default grade #grade0={}; grade0['color']='white';", "{} # get the rows as they will appear in our Greenie Board", "'3' in ij['icon']: text = case3 elif '2' in ij['icon']: text = case2", "str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ',", "try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: # minDate", "curDate = i['ServerDate'] # haveDate = True # # if curDate == i['ServerDate']:", "name = name.replace('_', '') name = name.replace('[', '') name = name.replace(']', '') name", "ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate = i['ServerDate'] #", "= 0 for col_idx in range(2,options['maxCols']+2): text = '' if count < len(titlestr):", "= 0 for col_idx in range(2,maxLength+2): text = '' if count < len(titlestr):", "= ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes", "'JOW Greenie Board ' + minDate + ' to ' + maxDate minRows", "'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe", "print('Searching for squadron: ' , squadron) for i in data: name = i['pilot']", "'#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell", "cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd:", "g == -1: color=bluegraycolor elif g == 0: color=blackcolor elif g == 1:", "for i in curList: count = count + 1 #print(' Calculate grade iteration:", "== 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset", "'#FFFFFF' pt = float(-1.0) for i in curList: if i['case'] == 3 and", "0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width,", "# minDate ='' # maxDate = '' textcolor = '#FFFFF0' edgecolor = '#708090'", "+= '3' if i['case'] == 2 and not '2' in gradeCell['icon']: gradeCell['icon'] +=", "= '' gradeCell['bg'] = '#FFFFFF' pts = [] count = 0 for i", "lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file) # go", "cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' + minDate", "for squadron: ' , squadron) for i in data: name = i['pilot'] #print('Name:", "+ i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength", "width, height = 100 / n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar", "'#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell)", "= '' if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] #", "redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor", "to the end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor)", "maxLength < 17: maxLength = 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1", "' , airframe) # data.remove(i) count = count + 1 print('Number of rows", "#print('not empty') if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points'])", "blankcell text = '' if '3' in ij['icon']: text = case3 elif '2'", "import statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth", "if i['case'] == 2 and not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try:", "scoreText = '' if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name]", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor)", "list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown", "ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0", "#print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data", "thrown in updateFromGoogle') return print('Local HypeMan LSO grade database updated from Google Sheets.')", "calculateGradeTailhooker(curList): # loop through their grades and find their FIRST wire gradeCell =", "maxLength = len(i) if maxLength < 17: maxLength = 17 fig = plt.figure(dpi=150)", "datetime.now().month titlestr = ' '+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2): text", "'%s' does not exists or is inaccessible\" %path) return ct-4000 return modification_time def", "-1: data2.append(i) count = count + 1; #print('Keeping in squadron: ' , name)", "for i in data: name = i['pilot'] #print('Name: ' , name) name =", "calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset == 'first': return", "= pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g", "#print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1]))", "'' if '5.5' in g['icon']: text = shithot elif '3' in g['icon'] and", "j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not", "plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] = ruleset options['maxRows']=10", "Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification", "name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: '", "import Table from matplotlib.font_manager import FontProperties import numpy as np import statistics import", "'' scoreText = '' if p_idx < len(pilots): name = pilots[p_idx] rd =", "cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color =", "data data = data2 data2 = [] print('Skipping WOFDs') for i in data:", "colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor = '#835C3B' # orangecolor = '#d17a00'", "g['icon']: text = case3 elif '5' in g['icon']: text = anchor elif '2'", "a specified airframe data2 = data print('... size of data array: ' ,", "= 2 for ij in rd: color = ij['bg'] if not color: color", "boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow: score = score", "orangecolor = '#d17a00' # yellowcolor = '#b6c700' # greencolor = '#0bab35' # bluecolor", "data = json.load(json_file) # go through and keep only a specified airframe data2", "browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' #", "('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1])) def", "cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' +", "for i in pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength", "ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe =", "data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was looking for: ' , airframe)", "-1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i in", "= '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor", "= grade0 if name == i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for", "' , name) name = name.replace('-', '') name = name.replace('_', '') name =", "range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False)", "# # if curDate == i['ServerDate']: # curList.append(i) # # else: # curDate", "#print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i in", "plt import matplotlib.colors as mcolors from matplotlib.table import Table from matplotlib.font_manager import FontProperties", "airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe)", "== 2.0: color=browncolor elif g == 2.5: color=bluecolor elif g == 3.0: color", "bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except:", "yellowcolor elif g == 4.0: color = greencolor elif g == 4.5: color", "'•' case3= '◉' case2 = '⊙' case2 = '○' #case2 = '○' #case2", "'#b6c700' # greencolor = '#0bab35' # bluecolor = '#01A2EA' #try: # minDate =", "'#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if g", "haveDate = True # # if curDate == i['ServerDate']: # curList.append(i) # #", "# redcolor = '#a00000' # browncolor = '#835C3B' # orangecolor = '#d17a00' #", "'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ',", "cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr", "= name.replace('@', '') name = name.lower() index = name.find(squadron) if index != -1:", "= {} pilotDict = {} # get the rows as they will appear", "greencolor elif g == 5: color = greencolor elif g == 5.5: color", "pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] =", "data = data2 data2 = [] print('Skipping WOFDs') for i in data: if", "= '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor", "array: ' , str(len(data))) count = 0 if squadron != '': data2 =", "# if not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286'", "= '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' #", "#+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count = 0 for col_idx in", "'#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00'", "1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor)", "airframe != '': data2 = [] print('Keeping only grades for airframe: ', airframe)", "= greencolor elif g == 5: color = greencolor elif g == 5.5:", "2 and not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points'])", "g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor)", "name = '' scoreText = '' if p_idx < len(pilots): name = pilots[p_idx]", "and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate:", "go through and keep only a specified airframe data2 = data print('... size", "i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score'] = i['points'] else:", "range(0,minRows): row_idx = p_idx+1 rd = [] name = '' scoreText = ''", "in curList: count = count + 1 #print(' Calculate grade iteration: ', count)", "if not gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT", "n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★'", "n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 = '•'", "cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count = 0 for", "grade0) # boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow) return boardRow def", "in range(2,maxLength+2): text = '' if count < len(titlestr): text = titlestr[count] count", "= name.replace('_', '') name = name.replace('[', '') name = name.replace(']', '') name =", "= pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for", "and not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2 and", "since the epoch:\", modification_time) except OSError: print(\"Path '%s' does not exists or is", "= {} # get the rows as they will appear in our Greenie", "g['icon']: text = anchor elif '2' in g['icon']: text = case2 cell =", "if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd)", "range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd = [] name =", "not in current month') for i in data: #print(i) idate = i['ServerDate'].split('/') imonth", "grade database updated from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time", "i in uniqueDates: #print(i) curList = []; for j in data: if name", "#tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options):", "count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5)", "squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset']", "to the end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7)", "pilotRows = {} pilotDict = {} # get the rows as they will", "updated from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path)", "loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board", "frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols", "CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow: score = score + i['score']", "count = count + 1; #print('Keeping in squadron: ' , name) # name", "' + minDate + ' to ' + maxDate minRows = len(pilots) if", "airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1])", "== 5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] =", "'': currentMonth = datetime.now().month print('skipping landings not in current month') for i in", "matplotlib.table import Table from matplotlib.font_manager import FontProperties import numpy as np import statistics", "len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]);", "and keep only a specified airframe data2 = data print('... size of data", "1; #print('Keeping in squadron: ' , name) # name = name.replace(squadron,'') # if", "cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none'", "= titlestr[count] count = count + 1 cell = tb.add_cell(0, col_idx, width, height,", "#plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = '' squadron = '' ruleset", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "count = count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center',", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5)", "= data2 print('size of data array: ' , str(len(data))) count = 0 if", "options) else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] = ruleset options['maxRows']=10 options['maxCols']=17", "#cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' + minDate + ' to '", "data2 = [] print('Skipping WOFDs') for i in data: if not'WOFD' in i['grade']:", "str(count)) data = data2 print('size of data array: ' , str(len(data))) count =", "= CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese': name = \"SippyCup\" cell", "print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data =", "= '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor =", "gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return", "= '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color", "goldstar = '★' case3 = '•' case3= '◉' case2 = '⊙' case2 =", "statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best':", "elif g == 4.5: color = greencolor elif g == 5: color =", "0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return", "then lets trim the landings not in the current month data2 = []", "text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie", "= count + 1 print('Number of rows kept: ', str(count)) data = data2", "grade0 if name == i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i", "airframe data2 = data print('... size of data array: ' , str(len(data))) count", "elif g == 1: color=redcolor elif g == 2.0: color=browncolor elif g ==", "to ' + maxDate minRows = len(pilots) if minRows < 12: minRows =", "goldstar = '⭐' goldstar = '★' case3 = '•' case3= '◉' case2 =", "#078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor = '#835C3B' # orangecolor", "' '+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2): text = '' if", "gradeCell['score']: # print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E'", "current month') for i in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1])", "# print('what') return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor", "finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i in pilotRows: if", "print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as", "= ij['bg'] if not color: color = blankcell text = '' if '3'", "+ 1 color = blankcell text='' # add the remaining cells to the", "[] pilotRows = {} pilotDict = {} # get the rows as they", "ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] =", "len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows #height = height/10", "if index != -1: data2.append(i) count = count + 1; #print('Keeping in squadron:", "client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as", "'#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700'", ">= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData =", "import os import time from datetime import datetime import matplotlib.pyplot as plt import", "in data: name = i['pilot'] #print('Name: ' , name) name = name.replace('-', '')", "through their grades and find their FIRST wire gradeCell = {} gradeCell['score'] =", "oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.')", "count = count + 1 #print(' Calculate grade iteration: ', count) # skip", "cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth =", "ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time since the epoch:\",", "if g == -1: color=bluegraycolor elif g == 0: color=blackcolor elif g ==", "g == 4.5: color = greencolor elif g == 5: color = greencolor", "== 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft:", "elif g == 5: color = greencolor elif g == 5.5: color =", "tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1", "('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: #", "12 #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd", "= '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor =", "g == 5: color = greencolor elif g == 5.5: color = bluegraycolor", "'2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points']) if tmp >", "bluegraycolor else: color = blankcell return color def calculateGradeTailhooker(curList): # loop through their", "imonth == currentMonthSQ: data2.append(i) data = data2 for i in reversed(data): name =", ">= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron:", "= 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if", "text = goldstar elif '3' in g['icon']: text = case3 elif '5' in", "#print(\"Last modification time since the epoch:\", modification_time) except OSError: print(\"Path '%s' does not", "= '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor =", "'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file) # go through and", "as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO", "g['icon']: text = shithot elif '3' in g['icon'] and '5' in g['icon']: text", "#cell.set_text_props(family='') # titlestr = 'JOW Greenie Board ' + minDate + ' to", "data2.append(i) count = count + 1; #print('Keeping in squadron: ' , name) #", "= name.find(squadron) if index != -1: data2.append(i) count = count + 1; #print('Keeping", "tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd: color =", "os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time) except OSError: print(\"Path '%s' does", "i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList", "= [] count = 0 for i in curList: count = count +", "in range(0,minRows): row_idx = p_idx+1 rd = [] name = '' scoreText =", "name.lower() == 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor)", "except OSError: print(\"Path '%s' does not exists or is inaccessible\" %path) return ct-4000", "count = 0 if airframe != '': data2 = [] print('Keeping only grades", "> pt: pt = tmp if tmp == 5 and not '5' in", "', airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4:", "case2 = '⊙' case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️'", "if not color: color = blankcell text = '' if '3' in ij['icon']:", "greencolor = '#0bab35' # try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate']", "2.0: color=browncolor elif g == 2.5: color=bluecolor elif g == 3.0: color =", "'#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if g == -1: color=bluegraycolor", "# get the rows as they will appear in our Greenie Board #", "str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if", "colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score']", "0.0 for i in pilotRow: score = score + i['score'] finalscore = score/len(pilotRow)", "# else: # curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade)", "col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr", "gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if", "gradeCell['bg'] = '#FFFFFF' pts = [] count = 0 for i in curList:", "#edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\"))", "'#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' # try: # minDate = data[-1]['ServerDate']", "#avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese':", "g == 5.5: color = bluegraycolor else: color = blankcell return color def", "'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe", "in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size()", "\"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell", "i['case'] == 3 and not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case']", "#tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults", "= '' if count < len(titlestr): text = titlestr[count] count = count +", "text = titlestr[count] count = count + 1 cell = tb.add_cell(0, col_idx, width,", "json import os import time from datetime import datetime import matplotlib.pyplot as plt", "as mcolors from matplotlib.table import Table from matplotlib.font_manager import FontProperties import numpy as", "[]; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i", "elif g == 0: color=blackcolor elif g == 1: color=redcolor elif g ==", "g in rd: color = g['bg'] text = '' if '5.5' in g['icon']:", "airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron", "cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in", "text = '' if '3' in ij['icon']: text = case3 elif '2' in", "if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was looking", "options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx", "defaults airframe = '' squadron = '' ruleset = 'best' #print('Length of argv:", "= statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset ==", "12: minRows = 12 #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx", "for i in reversed(data): name = i['pilot'] if name not in pilots: pilots.append(name)", "their FIRST wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg']", "greencolor elif g == 4.5: color = greencolor elif g == 5: color", "yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000'", "just keep the original data data = data2 data2 = [] print('Skipping WOFDs')", "= i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth: data2.append(i) data = data2", "their grades and find their FIRST wire gradeCell = {} gradeCell['score'] = 1", "= len(pilots) if minRows < 12: minRows = 12 #for p_idx in range(0,len(pilots)):", "i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe:", "'+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2): text = '' if count", "text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx", "4.0: color = greencolor elif g == 4.5: color = greencolor elif g", "= time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time)", "= tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([])", "grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then lets", "data2.append(i) data = data2 if squadron != '': currentMonthSQ = datetime.now().month print('skipping landings", "cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT OPS", "= name.lower() index = name.find(squadron) if index != -1: data2.append(i) count = count", "'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif", "= case3 elif '5' in g['icon']: text = anchor elif '2' in g['icon']:", "airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was looking for: ' ,", "def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i in pilotRows: if len(i)", "100.0 / n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar", "#except: # minDate ='' # maxDate = '' textcolor = '#000000' edgecolor =", "= 0 if airframe != '': data2 = [] print('Keeping only grades for", "print(\"Path '%s' does not exists or is inaccessible\" %path) return ct-4000 return modification_time", "squadron was empty just keep the original data data = data2 data2 =", "modification time since the epoch:\", modification_time) except OSError: print(\"Path '%s' does not exists", "shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 = '•' case3=", "str(datetime.now().year) print(titlestr) count = 0 for col_idx in range(2,maxLength+2): text = '' if", "= fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1,", "', airframe) for i in data: # if i['airframe'] #if i['airframe'] == airframe:", "color = greencolor elif g == 4.5: color = greencolor elif g ==", "data2 = [] if squadron == '': currentMonth = datetime.now().month print('skipping landings not", "len(i) if maxLength < 17: maxLength = 17 fig = plt.figure(dpi=150) ax =", "name.replace(']', '') name = name.replace('|', '') name = name.replace('\\\\', '') name = name.replace('/',", "= '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell)", "cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w')", "gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i in curList:", "haveDate: # curDate = i['ServerDate'] # haveDate = True # # if curDate", "= 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if", "'': currentMonthSQ = datetime.now().month print('skipping landings not in current month') for i in", "data2 = [] print('Keeping only grades for airframe: ', airframe) for i in", "!= '': currentMonthSQ = datetime.now().month print('skipping landings not in current month') for i", "'' textcolor = '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "# avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue')", "options): #print('PlotSquadron') maxLength = 0 for i in pilotRows: if len(i) > maxLength:", "i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if", "name.replace('@', '') name = name.lower() index = name.find(squadron) if index != -1: data2.append(i)", "name = name.replace('@', '') name = name.lower() index = name.find(squadron) if index !=", "if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList =", "OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count = 0 for", "greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell'", "pad_inches=0) # set defaults airframe = '' squadron = '' ruleset = 'best'", "maxDate = '' textcolor = '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none'", "minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in", "in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt #", "= 'JOW Greenie Board ' + minDate + ' to ' + maxDate", "= tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell =", "calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates = [] for i in", "#print(' Calculate grade iteration: ', count) # skip WOFDS if 'WOFD' in i['grade']:", "statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese': name =", "our Greenie Board # set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--'", "was empty just keep the original data data = data2 data2 = []", "# maxDate = '' textcolor = '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell)", "'5' in g['icon']: text = goldstar elif '3' in g['icon']: text = case3", "redcolor = '#a00000' # browncolor = '#835C3B' # orangecolor = '#d17a00' # yellowcolor", "print('Skipping WOFDs') for i in data: if not'WOFD' in i['grade']: data2.append(i) data =", "or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else: print('Less than", "add the remaining cells to the end for f in range(col_idx,options['maxCols']+2): cell =", "data2 print('Number remaining: ', str(len(data))) pilots = [] pilotRows = {} pilotDict =", "airframe = '' squadron = '' ruleset = 'best' #print('Length of argv: '", "', str(len(data))) pilots = [] pilotRows = {} pilotDict = {} # get", "squadron != '': data2 = [] print('Searching for squadron: ' , squadron) for", "= data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: # minDate ='' # maxDate", "for j in data: if name == j['pilot'] and j['ServerDate'] == i: curList.append(j)", "pilotDict[name]=pilotRow options = {} if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] =", "Board ' + minDate + ' to ' + maxDate minRows = len(pilots)", "minDate + ' to ' + maxDate minRows = len(pilots) if minRows <", "'') name = name.replace('_', '') name = name.replace('[', '') name = name.replace(']', '')", "dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0,", "+ str(datetime.now().year) print(titlestr) count = 0 for col_idx in range(2,maxLength+2): text = ''", "# minDate ='' # maxDate = '' textcolor = '#000000' edgecolor = '#708090'", "lets trim the landings not in the current month data2 = [] if", "#case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000'", "col_idx + 1 color = blankcell text='' # add the remaining cells to", "#from datetime import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:',", "= [] name = '' scoreText = '' if p_idx < len(pilots): name", "scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w')", "= [] pilotRows = {} pilotDict = {} # get the rows as", "frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols =", "'#0bab35' # try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except:", "not exists or is inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text='' # add", "maxLength+2 n_rows = len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows", "= datetime.now().month titlestr = ' '+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2):", "= [] print('Skipping WOFDs') for i in data: if not'WOFD' in i['grade']: data2.append(i)", "appear in our Greenie Board # set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0;", "if i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting", "#if i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ',", "for i in reversed(data): #grade = grade0 if name == i['pilot']: if i['ServerDate']", "# if curDate == i['ServerDate']: # curList.append(i) # # else: # curDate =", "was looking for: ' , airframe) # data.remove(i) count = count + 1", "ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0,", "'' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i in curList: if i['case']", "'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv)", "specified airframe data2 = data print('... size of data array: ' , str(len(data)))", "else: #print('not empty') if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore']", "data = data2 print('size of data array: ' , str(len(data))) count = 0", "print('Number remaining: ', str(len(data))) pilots = [] pilotRows = {} pilotDict = {}", "#print('Length of argv: ' , len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) ==", "in g['icon']: text = goldstar elif '3' in g['icon']: text = case3 elif", "colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']: # print('what') return gradeCell def", ", name) name = name.replace('-', '') name = name.replace('_', '') name = name.replace('[',", "= 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts = [] count =", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd: color = g['bg']", "not color: color = blankcell text = '' if '3' in ij['icon']: text", "if squadron != '': currentMonthSQ = datetime.now().month print('skipping landings not in current month')", "open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return print('Local", "else: color = blankcell return color def calculateGradeTailhooker(curList): # loop through their grades", "fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([])", "1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height =", "data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate ='' # maxDate = ''", "bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor", "pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow)", "in g['icon'] and '5' in g['icon']: text = goldstar elif '3' in g['icon']:", "titlestr = 'JOW Greenie Board ' + minDate + ' to ' +", "wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF'", "plotDefaultBoard(pilotRows, options): maxLength = 0 for i in pilotRows: if len(i) > maxLength:", "loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board", "= [] for i in reversed(data): #grade = grade0 if name == i['pilot']:", "data: # if i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i)", "# go through and keep only a specified airframe data2 = data print('...", "'/' + str(datetime.now().year) print(titlestr) count = 0 for col_idx in range(2,maxLength+2): text =", "== 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet'", "', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data = json.load(json_file)", "# name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx =", "' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {} if squadron", "# curDate = i['ServerDate'] # haveDate = True # # if curDate ==", "0: color=blackcolor elif g == 1: color=redcolor elif g == 2.0: color=browncolor elif", "f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False)", "= 'best' #print('Length of argv: ' , len(sys.argv)); if len(sys.argv) >= 2: if", "cell.set_edgecolor(edgecolor) col_idx = 2 for g in rd: color = g['bg'] text =", "width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr =", "'∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor =", "grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow)", "currentMonth: data2.append(i) data = data2 if squadron != '': currentMonthSQ = datetime.now().month print('skipping", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor)", "name = name.replace('|', '') name = name.replace('\\\\', '') name = name.replace('/', '') name", "landings not in current month') for i in data: #print(i) idate = i['ServerDate'].split('/')", "len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR']", "in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([])", "CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese': name = \"SippyCup\" cell =", "round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx];", "pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] =", "# curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList", "calculateGrade(curList, grade0) # boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow) return boardRow", "from matplotlib.font_manager import FontProperties import numpy as np import statistics import sys, getopt", "curList = []; for j in data: if name == j['pilot'] and j['ServerDate']", "= 0 for i in curList: count = count + 1 #print(' Calculate", "= int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data = data2 for i in", "Greenie Board ' + minDate + ' to ' + maxDate minRows =", "in pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength < options['maxRows']:", "remaining cells to the end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'", "'': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] = ruleset", "time since the epoch:\", modification_time) except OSError: print(\"Path '%s' does not exists or", "'' ruleset = 'best' #print('Length of argv: ' , len(sys.argv)); if len(sys.argv) >=", "set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is", "not haveDate: # curDate = i['ServerDate'] # haveDate = True # # if", "if name == j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade)", "== 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts))", "open('data.txt') as json_file: data = json.load(json_file) # go through and keep only a", "orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF'", "with open('data.txt') as json_file: data = json.load(json_file) # go through and keep only", "#case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] #", "as json_file: data = json.load(json_file) # go through and keep only a specified", "i in curList: if i['case'] == 3 and not '3' in gradeCell['icon']: gradeCell['icon']", "avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none'", "= tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor)", "width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr =", "[]; for j in data: if name == j['pilot'] and j['ServerDate'] == i:", "color = ij['bg'] if not color: color = blankcell text = '' if", "#edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color", "i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data = data2 for", "i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if", "datetime.now().month titlestr = ' JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year)", "scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name", "name.replace('|', '') name = name.replace('\\\\', '') name = name.replace('/', '') name = name.replace('@',", ", airframe) # data.remove(i) count = count + 1 print('Number of rows kept:", "= '#0bab35' # bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate", "minRows = len(pilots) if minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx in", "the rows as they will appear in our Greenie Board # set the", "the remaining cells to the end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color)", "cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell =", "as they will appear in our Greenie Board # set the default grade", "redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor", "else: print('Less than one hour since last refresh, skipping pull from google.') def", "100 / n_cols, 100.0 / n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar", "'3' if i['case'] == 2 and not '2' in gradeCell['icon']: gradeCell['icon'] += '2'", "# if the squadron was empty just keep the original data data =", "int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data = data2 for i in reversed(data):", "= data print('... size of data array: ' , str(len(data))) count = 0", "case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx", "cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24)", "pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {} if squadron == '': plotDefaultBoard(pilotRows,", "#if len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not", "cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board ' + minDate +", "the squadron was empty just keep the original data data = data2 data2", "in data: if name == j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade =", "curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList =", "+ 1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor)", "case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21", "edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell =", "mcolors from matplotlib.table import Table from matplotlib.font_manager import FontProperties import numpy as np", "= height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 =", "# print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time()", "currentMonthSQ = datetime.now().month print('skipping landings not in current month') for i in data:", "i['pilot'] if name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,'", "HypeMan LSO grade database updated from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time()", "not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number remaining: ', str(len(data))) pilots =", "== 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) #", "pt = float(-1.0) for i in curList: if i['case'] == 3 and not", "name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2", "import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if", "for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb)", "'' if count < len(titlestr): text = titlestr[count] count = count + 1", "browncolor = '#835C3B' # orangecolor = '#d17a00' # yellowcolor = '#b6c700' # greencolor", "# curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i in", "plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for", "frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25,", "data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i)", "the end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) #", "score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron')", "(pilotRow) # pilotDict[name]=pilotRow options = {} if squadron == '': plotDefaultBoard(pilotRows, options) else:", "= [] print('Keeping only grades for airframe: ', airframe) for i in data:", "import datetime import matplotlib.pyplot as plt import matplotlib.colors as mcolors from matplotlib.table import", "than one hour since last refresh, skipping pull from google.') def updateFromGoogle(): try:", "name == j['pilot'] and j['ServerDate'] == i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) #", "month') for i in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if", "is inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score']", "# loop through their grades and find their FIRST wire gradeCell = {}", "[] if squadron == '': currentMonth = datetime.now().month print('skipping landings not in current", "['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter':", "# curList.append(i) # # else: # curDate = i['ServerDate'] # grade = calculateGrade(curList,", "< 12: minRows = 12 #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows):", "in the current month data2 = [] if squadron == '': currentMonth =", "# if squadron is empty then lets trim the landings not in the", "'#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate", "', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file:", "= '' if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg", "for col_idx in range(2,maxLength+2): text = '' if count < len(titlestr): text =", "elif g == 3.0: color = yellowcolor elif g == 4.0: color =", "text = case3 elif '2' in ij['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color)", "blankcell return color def calculateGradeTailhooker(curList): # loop through their grades and find their", "1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height", "blackcolor = '#000000' color = 'blankcell' if g == -1: color=bluegraycolor elif g", "score = 0.0 for i in pilotRow: score = score + i['score'] finalscore", "blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor = '#835C3B'", "#grade = grade0 if name == i['pilot']: if i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate'])", "greencolor elif g == 5.5: color = bluegraycolor else: color = blankcell return", "+ maxDate minRows = len(pilots) if minRows < 12: minRows = 12 #for", "# if i['airframe'] #if i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i) #", "= tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1", "= count + 1; #print('Keeping in squadron: ' , name) # name =", "rows kept: ', str(count)) data = data2 print('size of data array: ' ,", "count = 0 if squadron != '': data2 = [] print('Searching for squadron:", "datetime import datetime import matplotlib.pyplot as plt import matplotlib.colors as mcolors from matplotlib.table", "cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text='' # add the", "if not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number remaining: ', str(len(data))) pilots", "/ n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar =", "tmp > pt: pt = tmp if tmp == 5 and not '5'", "data array: ' , str(len(data))) count = 0 if squadron != '': data2", "from Google.') updateFromGoogle() else: print('Less than one hour since last refresh, skipping pull", "' + maxDate minRows = len(pilots) if minRows < 12: minRows = 12", "gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pts", "json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO grade database", "if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score'] =", "squadron == '': currentMonth = datetime.now().month print('skipping landings not in current month') for", "0 if squadron != '': data2 = [] print('Searching for squadron: ' ,", "ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron)", "g == 1: color=redcolor elif g == 2.0: color=browncolor elif g == 2.5:", "#colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor = '#835C3B' # orangecolor =", "cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in rd: color = ij['bg'] if", "col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr", "in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth:", "imonth == currentMonth: data2.append(i) data = data2 if squadron != '': currentMonthSQ =", "float(i['points']) if tmp > pt: pt = tmp if tmp == 5 and", "= data[0]['ServerDate'] #except: # minDate ='' # maxDate = '' textcolor = '#000000'", "cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx +", "= '' scoreText = '' if p_idx < len(pilots): name = pilots[p_idx] rd", "Calculate grade iteration: ', count) # skip WOFDS if 'WOFD' in i['grade']: continue", "gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell", "< 17: maxLength = 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 =", "'#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35'", "name = name.replace(']', '') name = name.replace('|', '') name = name.replace('\\\\', '') name", "= True # # if curDate == i['ServerDate']: # curList.append(i) # # else:", "= anchor elif '2' in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none'", "'#000000' color = 'blankcell' if g == -1: color=bluegraycolor elif g == 0:", "'#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24)", "getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else: print('Less than one hour since", "'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if", "#try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate =''", "'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name", "ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for i in", "frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows", "== 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif", "'2' try: tmp = float(i['points']) if tmp > pt: pt = tmp if", "titlestr[count] count = count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text,", "Google.') updateFromGoogle() else: print('Less than one hour since last refresh, skipping pull from", "does not exists or is inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList):", "= -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i", "print('Keeping only grades for airframe: ', airframe) for i in data: # if", "squadron) for i in data: name = i['pilot'] #print('Name: ' , name) name", "+ ' to ' + maxDate minRows = len(pilots) if minRows < options['maxRows']:", "if tmp == 5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0", "= '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor =", "#height = height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3", "return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name)", "tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in rd: color", "yellowcolor = '#b6c700' greencolor = '#0bab35' # try: # minDate = data[-1]['ServerDate'] #", "{} if squadron == '': plotDefaultBoard(pilotRows, options) else: options['airframe'] = airframe options['squadron'] =", "'#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA'", "'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset =", "col_idx in range(2,maxLength+2): text = '' if count < len(titlestr): text = titlestr[count]", "import json import os import time from datetime import datetime import matplotlib.pyplot as", "score = score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows,", "# minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: # minDate =''", "i['ServerDate']: # curList.append(i) # # else: # curDate = i['ServerDate'] # grade =", "= '#000000' color = 'blankcell' if g == -1: color=bluegraycolor elif g ==", "not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else:", "hour since last refresh, skipping pull from google.') def updateFromGoogle(): try: scope =", "name.lower() index = name.find(squadron) if index != -1: data2.append(i) count = count +", "name = i['pilot'] #print('Name: ' , name) name = name.replace('-', '') name =", "imonth = int(idate[1]) if imonth == currentMonth: data2.append(i) data = data2 if squadron", "= i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell", "name = name.replace('-', '') name = name.replace('_', '') name = name.replace('[', '') name", "if squadron != '': data2 = [] print('Searching for squadron: ' , squadron)", "# titlestr = 'JOW Greenie Board ' + minDate + ' to '", "airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2])", "glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor", "'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv)", "== 4.0: color = greencolor elif g == 4.5: color = greencolor elif", "maxLength = 0 for i in pilotRows: if len(i) > maxLength: maxLength =", "i in data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth ==", "= len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows #height =", "#cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT OPS WING' #+ str(currentMonth) +", "pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij", "= '#FFFFFF' pt = float(-1.0) for i in curList: if i['case'] == 3", "squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData)", "['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes =", "elif g == 2.5: color=bluecolor elif g == 3.0: color = yellowcolor elif", "color = blankcell text='' # add the remaining cells to the end for", "count = 0 for col_idx in range(2,maxLength+2): text = '' if count <", "if imonth == currentMonth: data2.append(i) data = data2 if squadron != '': currentMonthSQ", "for airframe: ', airframe) for i in data: # if i['airframe'] #if i['airframe']", "json_file: data = json.load(json_file) # go through and keep only a specified airframe", "name.replace('_', '') name = name.replace('[', '') name = name.replace(']', '') name = name.replace('|',", "= i['ServerDate'] # haveDate = True # # if curDate == i['ServerDate']: #", "tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100 / n_cols,", "gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best': return", "uniqueDates: #print(i) curList = []; for j in data: if name == j['pilot']", "# greencolor = '#0bab35' # bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate']", "gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts) ==", "= data2 data2 = [] print('Skipping WOFDs') for i in data: if not'WOFD'", "i in reversed(data): #grade = grade0 if name == i['pilot']: if i['ServerDate'] not", "'' textcolor = '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "# maxDate = '' textcolor = '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell)", "== 1: color=redcolor elif g == 2.0: color=browncolor elif g == 2.5: color=bluecolor", "rd: color = ij['bg'] if not color: color = blankcell text = ''", "data = data2 print('Number remaining: ', str(len(data))) pilots = [] pilotRows = {}", "'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe", "minRows < 12: minRows = 12 #for p_idx in range(0,len(pilots)): for p_idx in", "= '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for i in curList: if", "try: modification_time = os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time) except OSError:", "#print('Name: ' , name) name = name.replace('-', '') name = name.replace('_', '') name", "they will appear in our Greenie Board # set the default grade #grade0={};", "+ 1 print('Number of rows kept: ', str(count)) data = data2 print('size of", "loop through their grades and find their FIRST wire gradeCell = {} gradeCell['score']", "= data2 print('Number remaining: ', str(len(data))) pilots = [] pilotRows = {} pilotDict", "else: options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] = ruleset options['maxRows']=10 options['maxCols']=17 plotSquadron(pilotRows,", "2: # print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or", "' , name) # name = name.replace(squadron,'') # if the squadron was empty", "matplotlib.font_manager import FontProperties import numpy as np import statistics import sys, getopt from", "#print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score']", "original data data = data2 data2 = [] print('Skipping WOFDs') for i in", "elif g == 2.0: color=browncolor elif g == 2.5: color=bluecolor elif g ==", "pull from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope)", "inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] =", "'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2: ',", "# maxDate = data[0]['ServerDate'] # except: # minDate ='' # maxDate = ''", "= name.replace(']', '') name = name.replace('|', '') name = name.replace('\\\\', '') name =", "data2 = [] print('Searching for squadron: ' , squadron) for i in data:", "'#d17a00' # yellowcolor = '#b6c700' # greencolor = '#0bab35' # bluecolor = '#01A2EA'", "if i['case'] == 3 and not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' + minDate + '", "= colorFromPoints(min(pts)) if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0:", "return boardRow def CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow: score =", "= 0 if squadron != '': data2 = [] print('Searching for squadron: '", "tmp = float(i['points']) if tmp > pt: pt = tmp if tmp ==", "color = blankcell return color def calculateGradeTailhooker(curList): # loop through their grades and", "3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax,", "= i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] ==", "as plt import matplotlib.colors as mcolors from matplotlib.table import Table from matplotlib.font_manager import", "index != -1: data2.append(i) count = count + 1; #print('Keeping in squadron: '", "'') name = name.replace('@', '') name = name.lower() index = name.find(squadron) if index", "in uniqueDates: #print(i) curList = []; for j in data: if name ==", "i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] = colorFromPoints(min(pts)) if i['case'] == 3:", "currentMonth = datetime.now().month print('skipping landings not in current month') for i in data:", "= '' if '3' in ij['icon']: text = case3 elif '2' in ij['icon']:", "data: #print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth: data2.append(i)", "= 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk':", "= 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def", "= maxLength+2 n_rows = len(pilots)+1 width, height = 100 / n_cols, 100.0 /", "#print('PlotSquadron') maxLength = 0 for i in pilotRows: if len(i) > maxLength: maxLength", "minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate ='' # maxDate", "p_idx in range(0,minRows): row_idx = p_idx+1 rd = [] name = '' scoreText", "elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >=", "np import statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import", "else: # curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade) #", "elif '2' in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') #", "set defaults airframe = '' squadron = '' ruleset = 'best' #print('Length of", "plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0,", "= len(i) if maxLength < 17: maxLength = 17 fig = plt.figure(dpi=150) ax", "for col_idx in range(2,options['maxCols']+2): text = '' if count < len(titlestr): text =", "p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd)", "'blankcell' if g == -1: color=bluegraycolor elif g == 0: color=blackcolor elif g", "google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client =", "FIRST wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] = '' gradeCell['bg'] =", "ij['bg'] if not color: color = blankcell text = '' if '3' in", "pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText =", "anchor='⚓' goldstar = '⭐' goldstar = '★' case3 = '•' case3= '◉' case2", "= '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' # try: # minDate =", "name.replace('[', '') name = name.replace(']', '') name = name.replace('|', '') name = name.replace('\\\\',", "= len(i) if maxLength < options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3),", "= tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "str(len(data))) pilots = [] pilotRows = {} pilotDict = {} # get the", "len(titlestr): text = titlestr[count] count = count + 1 cell = tb.add_cell(0, col_idx,", "= float(-1.0) for i in curList: if i['case'] == 3 and not '3'", "name = name.replace(squadron,'') # if the squadron was empty just keep the original", "= name.replace('|', '') name = name.replace('\\\\', '') name = name.replace('/', '') name =", "text = anchor elif '2' in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color)", "# #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows,", "+ 1 cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor)", "= {} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt =", "sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print ('Number of", "= colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList)", "text='' # add the remaining cells to the end for f in range(col_idx,maxLength+2):", "name = name.replace('[', '') name = name.replace(']', '') name = name.replace('|', '') name", "datetime.now().month print('skipping landings not in current month') for i in data: #print(i) idate", "data2.append(i) data = data2 for i in reversed(data): name = i['pilot'] if name", "== -1: color=bluegraycolor elif g == 0: color=blackcolor elif g == 1: color=redcolor", "'#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' # try: #", "month data2 = [] if squadron == '': currentMonth = datetime.now().month print('skipping landings", "curList.append(i) # # else: # curDate = i['ServerDate'] # grade = calculateGrade(curList, grade0)", "float(-1.0) for i in curList: if i['case'] == 3 and not '3' in", "minDate ='' # maxDate = '' textcolor = '#000000' edgecolor = '#708090' cell", "cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor)", "text = case3 elif '5' in g['icon']: text = anchor elif '2' in", "print('skipping landings not in current month') for i in data: #print(i) idate =", "tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #", "= '#835C3B' # orangecolor = '#d17a00' # yellowcolor = '#b6c700' # greencolor =", "of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument", "= '' ruleset = 'best' #print('Length of argv: ' , len(sys.argv)); if len(sys.argv)", "= data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate ='' # maxDate =", "cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count =", "count = count + 1 print('Number of rows kept: ', str(count)) data =", "= pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell", "in ij['icon']: text = case3 elif '2' in ij['icon']: text = case2 cell", "grade0['grade']='--' # if squadron is empty then lets trim the landings not in", "# print('Deleting airframe: ', i['airframe'], ' was looking for: ' , airframe) #", "g == 2.0: color=browncolor elif g == 2.5: color=bluecolor elif g == 3.0:", "plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i in pilotRows: if len(i) >", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board ' + minDate", "pilotDict = {} # get the rows as they will appear in our", "#edgecolor='none' cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color =", "frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False)", "count + 1 print('Number of rows kept: ', str(count)) data = data2 print('size", "'' gradeCell['bg'] = '#FFFFFF' pts = [] count = 0 for i in", "= '' squadron = '' ruleset = 'best' #print('Length of argv: ' ,", "== currentMonthSQ: data2.append(i) data = data2 for i in reversed(data): name = i['pilot']", "color: color = blankcell text = '' if '3' in ij['icon']: text =", "i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList = []; #", "= \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx];", "= datetime.now().month titlestr = ' JOINT OPS WING' #+ str(currentMonth) + '/' +", "squadron: ' , name) # name = name.replace(squadron,'') # if the squadron was", "ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for i", "bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height", "= 0.0 for i in pilotRow: score = score + i['score'] finalscore =", "ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow = [];", "= calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow) #", "not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']: gradeCell['score'] = i['points']", "col_idx = col_idx + 1 color = blankcell text='' # add the remaining", "= json.load(json_file) # go through and keep only a specified airframe data2 =", "sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile)", "' , str(len(data))) count = 0 if squadron != '': data2 = []", "+ '/' + str(datetime.now().year) print(titlestr) count = 0 for col_idx in range(2,maxLength+2): text", "end for f in range(col_idx,options['maxCols']+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7)", "'5.5' in g['icon']: text = shithot elif '3' in g['icon'] and '5' in", "i['ServerDate'] not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList = [];", "import time from datetime import datetime import matplotlib.pyplot as plt import matplotlib.colors as", "print('Aircraft: ', airframe) if len(sys.argv) >= 3: ruleset = str(sys.argv[2]) if len(sys.argv) >=", "json.load(json_file) # go through and keep only a specified airframe data2 = data", "last refresh, skipping pull from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds", "data = data2 if squadron != '': currentMonthSQ = datetime.now().month print('skipping landings not", "pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] = colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset):", "airframe) for i in data: # if i['airframe'] #if i['airframe'] == airframe: if", "i['airframe'] == airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'],", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(size=\"6.0\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) col_idx = 2 for ij in rd: color =", "n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★'", "print(titlestr) count = 0 for col_idx in range(2,maxLength+2): text = '' if count", "= sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in", "17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb", "' JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count =", "plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False)", "if name.lower() == 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\"))", "maxLength: maxLength = len(i) if maxLength < options['maxRows']: maxLength = options['maxRows'] fig =", "= name.replace('/', '') name = name.replace('@', '') name = name.lower() index = name.find(squadron)", "'' if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg =", "ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = '' squadron =", "i in data: name = i['pilot'] #print('Name: ' , name) name = name.replace('-',", "== 2 and not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp =", "textcolor = '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "data2 print('size of data array: ' , str(len(data))) count = 0 if squadron", "str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C'", "str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) == 'goshawk': airframe = 'T-45'", "'⭐' goldstar = '★' case3 = '•' case3= '◉' case2 = '⊙' case2", "cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell", "grades and find their FIRST wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon']", "g == 4.0: color = greencolor elif g == 4.5: color = greencolor", "for p_idx in range(0,minRows): row_idx = p_idx+1 rd = [] name = ''", "= data[0]['ServerDate'] # except: # minDate ='' # maxDate = '' textcolor =", "not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2 and not", "0 for i in pilotRows: if len(i) > maxLength: maxLength = len(i) if", "facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board '", "blankcell text='' # add the remaining cells to the end for f in", "calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow", "OSError: print(\"Path '%s' does not exists or is inaccessible\" %path) return ct-4000 return", "airframe: ', airframe) for i in data: # if i['airframe'] #if i['airframe'] ==", "not in the current month data2 = [] if squadron == '': currentMonth", "# name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2", "{} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0)", "i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth: data2.append(i) data = data2 if", "data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] # except: # minDate ='' # maxDate =", "str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating", "in data: if not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number remaining: ',", "color = g['bg'] text = '' if '5.5' in g['icon']: text = shithot", "== 3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score'] = 1 pts.append(1)", "scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1", "4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt'", "#unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286'", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT OPS WING' #+", "plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb =", "pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for g in", "3 and not '3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2", "#cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ =", "tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr =", "== currentMonth: data2.append(i) data = data2 if squadron != '': currentMonthSQ = datetime.now().month", "gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF' pt = float(-1.0) for", "= score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for", "idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data =", "text = shithot elif '3' in g['icon'] and '5' in g['icon']: text =", "= '⭐' goldstar = '★' case3 = '•' case3= '◉' case2 = '⊙'", "= tmp if tmp == 5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5'", "if name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score:", "squadron = '' ruleset = 'best' #print('Length of argv: ' , len(sys.argv)); if", "goldstar elif '3' in g['icon']: text = case3 elif '5' in g['icon']: text", "p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg", "elif str(sys.argv[1]) == 'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe =", "looking for: ' , airframe) # data.remove(i) count = count + 1 print('Number", "#unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor =", "in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], ' was looking for: '", "= float(i['points']) if tmp > pt: pt = tmp if tmp == 5", "3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else:", "WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count = 0 for col_idx", "== i: curList.append(j) ithPilotGrade = calculateGrade(curList,ruleset) boardRow.append(ithPilotGrade) # if not haveDate: # curDate", "case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21", "data2 = data print('... size of data array: ' , str(len(data))) count =", "if p_idx < len(pilots): name = pilots[p_idx] rd = pilotRows[name] # avg =", "0 if airframe != '': data2 = [] print('Keeping only grades for airframe:", "= '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if", "cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text='' #", "in reversed(data): #grade = grade0 if name == i['pilot']: if i['ServerDate'] not in", "#colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor =", "plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = '' squadron", "#print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {}", "to ' + maxDate minRows = len(pilots) if minRows < options['maxRows']: minRows =", "='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3 = '•' case3= '◉'", "col_idx = 2 for ij in rd: color = ij['bg'] if not color:", "i in pilotRow: score = score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return", "name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options", "from datetime import datetime import matplotlib.pyplot as plt import matplotlib.colors as mcolors from", "the end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb)", "titlestr = ' JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr)", "'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx", "pt: pt = tmp if tmp == 5 and not '5' in gradeCell['icon']:", "= statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor)", "client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception", "/ n_cols, 100.0 / n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar =", "0 for i in curList: count = count + 1 #print(' Calculate grade", "bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if g ==", "len(pilots) if minRows < 12: minRows = 12 #for p_idx in range(0,len(pilots)): for", "len(i) > maxLength: maxLength = len(i) if maxLength < options['maxRows']: maxLength = options['maxRows']", "fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb =", "= CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5)", "for g in rd: color = g['bg'] text = '' if '5.5' in", "n_rows = len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows #height", "count < len(titlestr): text = titlestr[count] count = count + 1 cell =", "is empty then lets trim the landings not in the current month data2", "case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx =", "# add the remaining cells to the end for f in range(col_idx,maxLength+2): cell", ", str(len(data))) count = 0 if squadron != '': data2 = [] print('Searching", "minRows = 12 #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx =", "gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2 and not '2' in gradeCell['icon']:", "= i['ServerDate'] # grade = calculateGrade(curList, grade0) # boardRow.append(grade) # curList = [];", "', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1:", "!= '': data2 = [] print('Keeping only grades for airframe: ', airframe) for", "name) # name = name.replace(squadron,'') # if the squadron was empty just keep", "modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg']", "ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength =", "Table from matplotlib.font_manager import FontProperties import numpy as np import statistics import sys,", "cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month", "getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print ('Number of arguments:',", "= p_idx+1 rd = [] name = '' scoreText = '' if p_idx", "'best': return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset):", "in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd = [] name", "5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt)", "if airframe != '': data2 = [] print('Keeping only grades for airframe: ',", "'★' case3 = '•' case3= '◉' case2 = '⊙' case2 = '○' #case2", "elif '3' in g['icon'] and '5' in g['icon']: text = goldstar elif '3'", "cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text='' #", "= '#d17a00' # yellowcolor = '#b6c700' # greencolor = '#0bab35' # bluecolor =", "= 'blankcell' if g == -1: color=bluegraycolor elif g == 0: color=blackcolor elif", "elif g == 5.5: color = bluegraycolor else: color = blankcell return color", "if not haveDate: # curDate = i['ServerDate'] # haveDate = True # #", "numpy as np import statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from", "# bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate']", "> maxLength: maxLength = len(i) if maxLength < options['maxRows']: maxLength = options['maxRows'] fig", "arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number", "'#a00000' # browncolor = '#835C3B' # orangecolor = '#d17a00' # yellowcolor = '#b6c700'", "= '#b6c700' # greencolor = '#0bab35' # bluecolor = '#01A2EA' #try: # minDate", "airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) ==", "= statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) if name.lower() == 'eese': name", "'hornet': airframe = 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1])", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' + minDate +", "'') name = name.replace('/', '') name = name.replace('@', '') name = name.lower() index", "'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name, ruleset): #print(name) boardRow = []; uniqueDates =", "= round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name =", "'' if '3' in ij['icon']: text = case3 elif '2' in ij['icon']: text", "get the rows as they will appear in our Greenie Board # set", "one hour since last refresh, skipping pull from google.') def updateFromGoogle(): try: scope", "', count) # skip WOFDS if 'WOFD' in i['grade']: continue if not i['wire']:", "'∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000' # browncolor", "cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([])", "tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) # #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w')", "tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = '", "name = name.replace('/', '') name = name.replace('@', '') name = name.lower() index =", "if 'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty')", "for i in data: if not'WOFD' in i['grade']: data2.append(i) data = data2 print('Number", "pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name]", "'#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none'", "size of data array: ' , str(len(data))) count = 0 if airframe !=", "'○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718',", "kept: ', str(count)) data = data2 print('size of data array: ' , str(len(data)))", "== 4.5: color = greencolor elif g == 5: color = greencolor elif", "minRows = len(pilots) if minRows < 12: minRows = 12 #for p_idx in", "= 'FA-18C_hornet' elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier':", "#edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) #", "return color def calculateGradeTailhooker(curList): # loop through their grades and find their FIRST", "'⊙' case2 = '○' #case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen']", "= plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols", "currentMonth = datetime.now().month titlestr = ' JOINT OPS WING' #+ str(currentMonth) + '/'", "LSO grade database updated from Google Sheets.') def getModificationTimeSeconds(path): ct = time.time() try:", "i in curList: count = count + 1 #print(' Calculate grade iteration: ',", "calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] = '#FFFFFF'", "updateFromGoogle') return print('Local HypeMan LSO grade database updated from Google Sheets.') def getModificationTimeSeconds(path):", "datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)==", "plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols =", "airframe) # data.remove(i) count = count + 1 print('Number of rows kept: ',", "the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty", "name = name.replace('\\\\', '') name = name.replace('/', '') name = name.replace('@', '') name", "2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet':", "cell.set_linewidth(0.5) col_idx = 2 for ij in rd: color = ij['bg'] if not", "+ 1; #print('Keeping in squadron: ' , name) # name = name.replace(squadron,'') #", "gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes,", "the original data data = data2 data2 = [] print('Skipping WOFDs') for i", "len(i) if maxLength < options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250)", "updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.')", "cells to the end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor)", ", len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B',", "'#0bab35' # bluecolor = '#01A2EA' #try: # minDate = data[-1]['ServerDate'] # maxDate =", "count = 0 for col_idx in range(2,options['maxCols']+2): text = '' if count <", "%path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1", "' was looking for: ' , airframe) # data.remove(i) count = count +", "grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then lets trim the landings not", "in current month') for i in data: #print(i) idate = i['ServerDate'].split('/') imonth =", "WOFDs') for i in data: if not'WOFD' in i['grade']: data2.append(i) data = data2", "text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor)", "uniqueDates = [] for i in reversed(data): #grade = grade0 if name ==", "datetime import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv))", "in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not", "except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']: #", "cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx = col_idx", "len(pilots): name = pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd) avg =", "100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar = '★' case3", "JOINT OPS WING' #+ str(currentMonth) + '/' + str(datetime.now().year) print(titlestr) count = 0", "and not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points']) if", "# minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #except: # minDate ='' #", "i['airframe'], ' was looking for: ' , airframe) # data.remove(i) count = count", "ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet = client.open('HypeMan_LSO_Grades').sheet1 list_of_hashes = sheet.get_all_records() with open('data.txt',", "with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return", "'#b6c700' greencolor = '#0bab35' bluecolor = '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color =", "score/len(pilotRow) #print(finalscore) return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i", "< len(pilots): name = pilots[p_idx] rd = pilotRows[name] #avg = statistics.mean(rd) avg =", "def CalculateAverageScore(pilotRow): score = 0.0 for i in pilotRow: score = score +", "= yellowcolor elif g == 4.0: color = greencolor elif g == 4.5:", "gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']: # print('what') return", "in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList = []; for j", "colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor =", "= tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='')", "only a specified airframe data2 = data print('... size of data array: '", "'5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt # if not gradeCell['score']:", "text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie", "cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList = []; for j in data:", "outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle') return print('Local HypeMan LSO grade", "# if not haveDate: # curDate = i['ServerDate'] # haveDate = True #", "'3' in g['icon'] and '5' in g['icon']: text = goldstar elif '3' in", "fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1])", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count = 0", "rd = [] name = '' scoreText = '' if p_idx < len(pilots):", "end for f in range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off()", "[] print('Searching for squadron: ' , squadron) for i in data: name =", "landings not in the current month data2 = [] if squadron == '':", "def updateDatabase(path): if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from", "len(sys.argv)== 2: # print('Argument Number 2: ', str(sys.argv[1])) def updateDatabase(path): if not os.path.isfile(path)", "pts = [] count = 0 for i in curList: count = count", "= '#a00000' # browncolor = '#835C3B' # orangecolor = '#d17a00' # yellowcolor =", "= case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412') cell.set_linewidth(0.5) # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"10\")) cell.set_edgecolor(edgecolor) col_idx", "height = 100 / n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar =", "= 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([])", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "g == 0: color=blackcolor elif g == 1: color=redcolor elif g == 2.0:", "< len(pilots): name = pilots[p_idx] rd = pilotRows[name] # avg = statistics.mean(rd) avg", "as np import statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime", "= g['bg'] text = '' if '5.5' in g['icon']: text = shithot elif", "FontProperties import numpy as np import statistics import sys, getopt from oauth2client.service_account import", "calculatePilotRow(data, name, ruleset) #print(name,' score: ' , pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow", "maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 =", "1: color=redcolor elif g == 2.0: color=browncolor elif g == 2.5: color=bluecolor elif", "n_rows = len(pilots)+1 width, height = 100 / n_cols, 100.0 / n_rows shithot", "modification_time = os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time) except OSError: print(\"Path", "== 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif", "if i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score'] =", "# haveDate = True # # if curDate == i['ServerDate']: # curList.append(i) #", "color=redcolor elif g == 2.0: color=browncolor elif g == 2.5: color=bluecolor elif g", ", name) # name = name.replace(squadron,'') # if the squadron was empty just", "if not os.path.isfile(path) or time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle()", "cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24)", "= plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1)", "tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='')", "or is inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell = {}", "time.time() - getModificationTimeSeconds(path) > 1: print('Updating from Google.') updateFromGoogle() else: print('Less than one", "== 3.0: color = yellowcolor elif g == 4.0: color = greencolor elif", "='' # maxDate = '' textcolor = '#FFFFF0' edgecolor = '#708090' cell =", "= blankcell text='' # add the remaining cells to the end for f", "rows as they will appear in our Greenie Board # set the default", "= '#01A2EA' blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if g == -1:", "== 2.5: color=bluecolor elif g == 3.0: color = yellowcolor elif g ==", "+ 1 #print(' Calculate grade iteration: ', count) # skip WOFDS if 'WOFD'", "'#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700'", "rd = pilotRows[name] # avg = statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1)", "len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score'] = statistics.mean(pts) gradeCell['bg'] =", "True # # if curDate == i['ServerDate']: # curList.append(i) # # else: #", "bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1", "name = i['pilot'] if name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name,", "range(col_idx,maxLength+2): cell = tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([])", "1 print('Number of rows kept: ', str(count)) data = data2 print('size of data", "= [] if squadron == '': currentMonth = datetime.now().month print('skipping landings not in", "row_idx = p_idx+1 rd = [] name = '' scoreText = '' if", "ij['icon']: text = case3 elif '2' in ij['icon']: text = case2 cell =", "squadron) print('Ruleset: ', ruleset) lsoData = 'data.txt' updateDatabase(lsoData) with open('data.txt') as json_file: data", "anchor elif '2' in g['icon']: text = case2 cell = tb.add_cell(row_idx,col_idx,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.get_text().set_color('#333412')", "cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set", "cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month titlestr = ' JOINT OPS WING' #+ str(currentMonth)", "count = 0 for i in curList: count = count + 1 #print('", "options['maxRows'] #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd", "1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100 /", "= 100 / n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐'", "def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds)", "gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score'] = 1 pts.append(1) else: gradeCell['score']", "g['bg'] text = '' if '5.5' in g['icon']: text = shithot elif '3'", "Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows =", "of rows kept: ', str(count)) data = data2 print('size of data array: '", "range(2,options['maxCols']+2): text = '' if count < len(titlestr): text = titlestr[count] count =", "= name.replace('\\\\', '') name = name.replace('/', '') name = name.replace('@', '') name =", "len(sys.argv), 'arguments.') #print ('Argument List:', str(sys.argv)) #if len(sys.argv)== 2: # print('Argument Number 2:", "len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1])", "5.5: color = bluegraycolor else: color = blankcell return color def calculateGradeTailhooker(curList): #", "ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = '' squadron = ''", "def calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if ruleset == 'first':", "blankcell='#FFFFFF' blackcolor = '#000000' color = 'blankcell' if g == -1: color=bluegraycolor elif", "from matplotlib.table import Table from matplotlib.font_manager import FontProperties import numpy as np import", "maxDate minRows = len(pilots) if minRows < options['maxRows']: minRows = options['maxRows'] #for p_idx", "os import time from datetime import datetime import matplotlib.pyplot as plt import matplotlib.colors", "if tmp > pt: pt = tmp if tmp == 5 and not", "since last refresh, skipping pull from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']", "'◉' case2 = '⊙' case2 = '○' #case2 = '○' #case2 = '∘'", "Greenie Board # set the default grade #grade0={}; grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' #", "2 for ij in rd: color = ij['bg'] if not color: color =", "color = greencolor elif g == 5.5: color = bluegraycolor else: color =", "exists or is inaccessible\" %path) return ct-4000 return modification_time def calculateGradeCivilian(curList): gradeCell =", "'#b6c700' greencolor = '#0bab35' # try: # minDate = data[-1]['ServerDate'] # maxDate =", "statistics.mean(rd) avg = CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\"))", "i['case'] == 2 and not '2' in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp", "name, ruleset): #print(name) boardRow = []; uniqueDates = [] for i in reversed(data):", "tb = Table(ax, bbox=[0, 0, 1, 1]) #tb.scale(0.25, 1) tb.auto_set_font_size(False) n_cols = maxLength+2", "cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ", "# cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell text=''", "#print('Keeping in squadron: ' , name) # name = name.replace(squadron,'') # if the", "cell = tb.add_cell(0, col_idx, width, height, text=text.upper(), loc='center', facecolor=blankcell) cell.set_linewidth(0.5) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "data2 data2 = [] print('Skipping WOFDs') for i in data: if not'WOFD' in", "+ ' to ' + maxDate minRows = len(pilots) if minRows < 12:", "cell.get_text().set_color('#333412') # cell.auto_set_font_size() cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"14\")) cell.set_edgecolor(edgecolor) col_idx = col_idx + 1 color = blankcell", "plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for i in pilotRows: if", "pilots = [] pilotRows = {} pilotDict = {} # get the rows", "facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board '", "= shithot elif '3' in g['icon'] and '5' in g['icon']: text = goldstar", "gspread import json import os import time from datetime import datetime import matplotlib.pyplot", "= '#0bab35' # try: # minDate = data[-1]['ServerDate'] # maxDate = data[0]['ServerDate'] #", "pilotRow) pilotRows[name] = (pilotRow) # pilotDict[name]=pilotRow options = {} if squadron == '':", "#cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count = 0 for col_idx", "width, height = 100 / n_cols, 100.0 / n_rows #height = height/10 shithot", "elif g == 4.0: color = greencolor elif g == 4.5: color =", "if maxLength < 17: maxLength = 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1)", "if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif", "if count < len(titlestr): text = titlestr[count] count = count + 1 cell", "keep the original data data = data2 data2 = [] print('Skipping WOFDs') for", "'') name = name.replace('\\\\', '') name = name.replace('/', '') name = name.replace('@', '')", "17: maxLength = 17 fig = plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca()", "'#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35'", "argv: ' , len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe", "col_idx in range(2,options['maxCols']+2): text = '' if count < len(titlestr): text = titlestr[count]", "= os.path.getmtime(path) #print(\"Last modification time since the epoch:\", modification_time) except OSError: print(\"Path '%s'", "case3= '◉' case2 = '⊙' case2 = '○' #case2 = '○' #case2 =", "= '#b6c700' greencolor = '#0bab35' # try: # minDate = data[-1]['ServerDate'] # maxDate", "pt = tmp if tmp == 5 and not '5' in gradeCell['icon']: gradeCell['icon']+=", "-1: color=bluegraycolor elif g == 0: color=blackcolor elif g == 1: color=redcolor elif", "def calculateGradeTailhooker(curList): # loop through their grades and find their FIRST wire gradeCell", "in updateFromGoogle') return print('Local HypeMan LSO grade database updated from Google Sheets.') def", "/ n_rows #height = height/10 shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar =", "= (pilotRow) # pilotDict[name]=pilotRow options = {} if squadron == '': plotDefaultBoard(pilotRows, options)", "#colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor = '#a00000' bluegraycolor = '#708286' glossgraycolor = '#5F615E' browncolor", "curList: if i['case'] == 3 and not '3' in gradeCell['icon']: gradeCell['icon'] += '3'", "# curList = []; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow): score =", "and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score']", "= [] print('Searching for squadron: ' , squadron) for i in data: name", "statistics import sys, getopt from oauth2client.service_account import ServiceAccountCredentials #from datetime import datetime #print", "#titlestr = 'JOW Greenie Board ' + minDate + ' to ' +", "return gradeCell def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24'", "#case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor = '#a00000'", "minDate ='' # maxDate = '' textcolor = '#FFFFF0' edgecolor = '#708090' cell", "pad_inches=0) def plotDefaultBoard(pilotRows, options): maxLength = 0 for i in pilotRows: if len(i)", "1 color = blankcell text='' # add the remaining cells to the end", "' , squadron) for i in data: name = i['pilot'] #print('Name: ' ,", "options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([])", "1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100", "'○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor =", "return finalscore def plotSquadron(pilotRows, options): #print('PlotSquadron') maxLength = 0 for i in pilotRows:", "[] for i in reversed(data): #grade = grade0 if name == i['pilot']: if", "cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell = tb.add_cell(0,1,width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor)", "in pilotRow: score = score + i['score'] finalscore = score/len(pilotRow) #print(finalscore) return finalscore", "updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet", "= i['pilot'] if name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset)", "str(len(data))) count = 0 if airframe != '': data2 = [] print('Keeping only", "edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #cell.set_fontsize(24) cell", "str(sys.argv[1]) == 'goshawk': airframe = 'T-45' print('Aircraft: ', airframe) if len(sys.argv) >= 3:", "squadron: ' , squadron) for i in data: name = i['pilot'] #print('Name: '", "def calculateGradeCivilian(curList): gradeCell = {} gradeCell['score'] = -1 gradeCell['icon'] = '' gradeCell['bg'] =", "color=blackcolor elif g == 1: color=redcolor elif g == 2.0: color=browncolor elif g", "'3' in gradeCell['icon']: gradeCell['icon'] += '3' if i['case'] == 2 and not '2'", "airframe: ', i['airframe'], ' was looking for: ' , airframe) # data.remove(i) count", "in curList: if i['case'] == 3 and not '3' in gradeCell['icon']: gradeCell['icon'] +=", "ruleset == 'best': return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data,", "WOFDS if 'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not", "skipping pull from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json',", "# orangecolor = '#d17a00' # yellowcolor = '#b6c700' # greencolor = '#0bab35' #", "ServiceAccountCredentials #from datetime import datetime #print ('Number of arguments:', len(sys.argv), 'arguments.') #print ('Argument", "if str(sys.argv[1]) == 'turkey': airframe = ['F-14B', 'F-14A-135-GR'] elif str(sys.argv[1]) == 'hornet': airframe", "> 1: print('Updating from Google.') updateFromGoogle() else: print('Less than one hour since last", "+ minDate + ' to ' + maxDate minRows = len(pilots) if minRows", "elif str(sys.argv[1]) == 'scooter': airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe =", "!= -1: data2.append(i) count = count + 1; #print('Keeping in squadron: ' ,", "ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight', pad_inches=0) # set defaults airframe = ''", "= int(idate[1]) if imonth == currentMonth: data2.append(i) data = data2 if squadron !=", "squadron != '': currentMonthSQ = datetime.now().month print('skipping landings not in current month') for", "not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data, name, ruleset) #print(name,' score: ' ,", "#cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonth = datetime.now().month", "g['icon'] and '5' in g['icon']: text = goldstar elif '3' in g['icon']: text", "titlestr[count] count = count + 1 cell = tb.add_cell(0, col_idx, width, height, text=text.upper(),", "color = 'blankcell' if g == -1: color=bluegraycolor elif g == 0: color=blackcolor", "#for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1 rd =", "tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor)", "'○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E'] redcolor =", "color = bluegraycolor else: color = blankcell return color def calculateGradeTailhooker(curList): # loop", "import matplotlib.colors as mcolors from matplotlib.table import Table from matplotlib.font_manager import FontProperties import", "trim the landings not in the current month data2 = [] if squadron", "'5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg'] = colorFromPoints(pt) gradeCell['score'] = pt", "= []; for j in data: if name == j['pilot'] and j['ServerDate'] ==", "#print(i) idate = i['ServerDate'].split('/') imonth = int(idate[1]) if imonth == currentMonth: data2.append(i) data", "= data2 for i in reversed(data): name = i['pilot'] if name not in", "# skip WOFDS if 'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points'])", "case3 elif '5' in g['icon']: text = anchor elif '2' in g['icon']: text", "print('Number of rows kept: ', str(count)) data = data2 print('size of data array:", "getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time since the", "range(2,maxLength+2): text = '' if count < len(titlestr): text = titlestr[count] count =", "def colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor =", "if len(i) > maxLength: maxLength = len(i) if maxLength < options['maxRows']: maxLength =", "def getModificationTimeSeconds(path): ct = time.time() try: modification_time = os.path.getmtime(path) #print(\"Last modification time since", "= greencolor elif g == 4.5: color = greencolor elif g == 5:", "1 cell = tb.add_cell(0, col_idx, width, height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8))", "= '#FFFFFF' pts = [] count = 0 for i in curList: count", "color=bluecolor elif g == 3.0: color = yellowcolor elif g == 4.0: color", "= 12 #for p_idx in range(0,len(pilots)): for p_idx in range(0,minRows): row_idx = p_idx+1", "airframe = 'A-4E-C' elif str(sys.argv[1]) == 'harrier': airframe = 'AV8BNA' elif str(sys.argv[1]) ==", "== 5: color = greencolor elif g == 5.5: color = bluegraycolor else:", "== airframe: if i['airframe'] in airframe: data2.append(i) # print('Deleting airframe: ', i['airframe'], '", "'' squadron = '' ruleset = 'best' #print('Length of argv: ' , len(sys.argv));", "the epoch:\", modification_time) except OSError: print(\"Path '%s' does not exists or is inaccessible\"", "cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW Greenie Board ' +", "data2.append(i) data = data2 print('Number remaining: ', str(len(data))) pilots = [] pilotRows =", "the landings not in the current month data2 = [] if squadron ==", ", squadron) for i in data: name = i['pilot'] #print('Name: ' , name)", "sheet.get_all_records() with open('data.txt', 'w') as outfile: json.dump(list_of_hashes, outfile) except: print('Exception thrown in updateFromGoogle')", "= tb.add_cell(row_idx,f,width,height,text=text,loc='center',facecolor=color) #edgecolor='none' cell.set_edgecolor(edgecolor) #tb.set_fontsize(7) ax.add_table(tb) ax.set_axis_off() ax.axis('off') plt.box(False) ax.get_xaxis().set_ticks([]) ax.get_yaxis().set_ticks([]) #plt.title(titlestr,color='w') plt.savefig('board.png',transparent=False,bbox_inches='tight',", "count + 1; #print('Keeping in squadron: ' , name) # name = name.replace(squadron,'')", "name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.4\")) cell.set_edgecolor(edgecolor) col_idx = 2 for", "= str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset:", "i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else: #print('not empty') if not i['finalscore']:", "' , len(sys.argv)); if len(sys.argv) >= 2: if str(sys.argv[1]) == 'turkey': airframe =", "options): maxLength = 0 for i in pilotRows: if len(i) > maxLength: maxLength", "colorFromPoints(g): bluegraycolor = '#708286' glossgraycolor = '#5F615E' redcolor = '#ED1B24' browncolor = '#835C3B'", "try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds = ServiceAccountCredentials.from_json_keyfile_name('HypeManLSO-358d4493fc1d.json', scope) client = gspread.authorize(creds) sheet =", "0 for col_idx in range(2,maxLength+2): text = '' if count < len(titlestr): text", "3: ruleset = str(sys.argv[2]) if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ',", "imonth = int(idate[1]) if imonth == currentMonthSQ: data2.append(i) data = data2 for i", "colorFromPoints(min(pts)) return gradeCell def calculateGrade(curList, ruleset): if ruleset == 'best': return calculateGradeCivilian(curList) if", "elif '3' in g['icon']: text = case3 elif '5' in g['icon']: text =", "currentMonthSQ: data2.append(i) data = data2 for i in reversed(data): name = i['pilot'] if", "#print(name) boardRow = []; uniqueDates = [] for i in reversed(data): #grade =", "n_cols = maxLength+2 n_rows = len(pilots)+1 width, height = 100 / n_cols, 100.0", "= tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) cell = tb.add_cell(0,1,2*width,height,text='',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_edgecolor(edgecolor)", "2 for g in rd: color = g['bg'] text = '' if '5.5'", "= tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,2*width,height,text=scoreText,loc='center',facecolor=blankcell)", "cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') #titlestr = 'JOW Greenie Board ' + minDate + ' to", "CalculateAverageScore(rd) scoreText = round(avg,1) cell = tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) #", "= '○' #case2 = '∘' #unicorn='✈️' blankcell='#FFFFFF' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718'] # redcolor", "= i['pilot'] #print('Name: ' , name) name = name.replace('-', '') name = name.replace('_',", "if '5.5' in g['icon']: text = shithot elif '3' in g['icon'] and '5'", "= ' '+options['squadron'] count = 0 for col_idx in range(2,options['maxCols']+2): text = ''", "= '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' # try:", "case3 = '•' case3= '◉' case2 = '⊙' case2 = '○' #case2 =", "tb.add_cell(row_idx,0,10*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7\")) cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) # name = pilots[p_idx]; cell = tb.add_cell(row_idx,1,width,height,text=scoreText,loc='center',facecolor=blankcell)", "height, text=text, loc='center', facecolor=blankcell) cell.set_edgecolor(edgecolor) cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_text_props(family='') # titlestr = 'JOW", "< options['maxRows']: maxLength = options['maxRows'] fig = plt.figure(figsize=(6, 3), dpi=250) ax = fig.add_subplot(1,1,1)", "frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax, bbox=[0, 0, 1, 1]) tb.auto_set_font_size(False) n_cols = maxLength+2", "'') name = name.replace('|', '') name = name.replace('\\\\', '') name = name.replace('/', '')", "pilotRows: if len(i) > maxLength: maxLength = len(i) if maxLength < options['maxRows']: maxLength", "count + 1 #print(' Calculate grade iteration: ', count) # skip WOFDS if", "#case2 = '○' #case2 = '∘' #unicorn='✈️' blankcell='#1A392A' #colors=['red','orange','orange','yellow','lightgreen'] #078a21 #colors=['#a00000','#835C3B','#d17a00','#b6c700','#0bab35','#057718','#057718'] colors=['#a00000','#d17a00','#d17a00','#b6c700','#0bab35','#057718','#057718', '#708286','#5F615E']", "tmp == 5 and not '5' in gradeCell['icon']: gradeCell['icon']+= '5' except: pt=0 gradeCell['bg']", "in gradeCell['icon']: gradeCell['icon'] += '2' try: tmp = float(i['points']) if tmp > pt:", "= '' if '5.5' in g['icon']: text = shithot elif '3' in g['icon']", "maxDate = '' textcolor = '#000000' edgecolor = '#708090' cell = tb.add_cell(0,0,10*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none'", "+= '2' try: tmp = float(i['points']) if tmp > pt: pt = tmp", "100 / n_cols, 100.0 / n_rows shithot ='🎖️' anchor='⚓' goldstar = '⭐' goldstar", "= goldstar elif '3' in g['icon']: text = case3 elif '5' in g['icon']:", "skip WOFDS if 'WOFD' in i['grade']: continue if not i['wire']: #print('Empty.') pts.append(i['points']) else:", "grade0['color']='white'; grade0['score']=0.0; grade0['symbol']='x'; grade0['grade']='--' # if squadron is empty then lets trim the", "orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' # try: # minDate", "name) name = name.replace('-', '') name = name.replace('_', '') name = name.replace('[', '')", "name.replace(squadron,'') # if the squadron was empty just keep the original data data", "i in reversed(data): name = i['pilot'] if name not in pilots: pilots.append(name) pilotRow", "cell.set_edgecolor(edgecolor) cell.set_linewidth(0.5) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24) currentMonthSQ = datetime.now().month titlestr = ' '+options['squadron'] count", "not in uniqueDates: uniqueDates.append(i['ServerDate']) for i in uniqueDates: #print(i) curList = []; for", "# boardRow.append(grade) # curList = []; # curList.append(i) #print(boardRow) return boardRow def CalculateAverageScore(pilotRow):", "name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=\"7.5\")) cell.set_edgecolor(edgecolor) # name =", "= plt.figure(dpi=150) ax = fig.add_subplot(1,1,1) frame1 = plt.gca() frame1.axes.get_xaxis().set_ticks([]) frame1.axes.get_yaxis().set_ticks([]) tb = Table(ax,", "g == 2.5: color=bluecolor elif g == 3.0: color = yellowcolor elif g", "reversed(data): name = i['pilot'] if name not in pilots: pilots.append(name) pilotRow = calculatePilotRow(data,", "= '#FFFFF0' edgecolor = '#708090' cell = tb.add_cell(0,0,8*width,height,text='Callsign',loc='center',facecolor=blankcell) #edgecolor='none' cell.get_text().set_color(textcolor) cell.set_text_props(fontproperties=FontProperties(weight='bold',size=8)) cell.set_edgecolor(edgecolor) #cell.set_fontsize(24)", "refresh, skipping pull from google.') def updateFromGoogle(): try: scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive'] creds =", "< len(titlestr): text = titlestr[count] count = count + 1 cell = tb.add_cell(0,", "of data array: ' , str(len(data))) count = 0 if airframe != '':", "round(avg,1) if name.lower() == 'eese': name = \"SippyCup\" cell = tb.add_cell(row_idx,0,8*width,height,text=name,loc='center',facecolor=blankcell,edgecolor='blue') #edgecolor='none' cell.get_text().set_color(textcolor)", "== 'best': return calculateGradeCivilian(curList) if ruleset == 'first': return calculateGradeTailhooker(curList) def calculatePilotRow(data, name,", "find their FIRST wire gradeCell = {} gradeCell['score'] = 1 gradeCell['icon'] = ''", "p_idx+1 rd = [] name = '' scoreText = '' if p_idx <", "boardRow.append(ithPilotGrade) # if not haveDate: # curDate = i['ServerDate'] # haveDate = True", "= '#708286' glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor =", "options['airframe'] = airframe options['squadron'] = squadron options['ruleset'] = ruleset options['maxRows']=10 options['maxCols']=17 plotSquadron(pilotRows, options)", "if len(sys.argv) >= 4: squadron = str(sys.argv[3]); print('Squadron: ', squadron) print('Ruleset: ', ruleset)", "browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor = '#0bab35' bluecolor", "i['case'] == 3: gradeCell['icon']+='3' return gradeCell if len(pts) == 0: gradeCell['score'] = 1", "epoch:\", modification_time) except OSError: print(\"Path '%s' does not exists or is inaccessible\" %path)", "if not i['finalscore']: gradeCell['score'] = i['points'] else: gradeCell['score'] = i['finalscore'] pts.append(i['points']) gradeCell['bg'] =", "= col_idx + 1 color = blankcell text='' # add the remaining cells", ", str(len(data))) count = 0 if airframe != '': data2 = [] print('Keeping", "glossgraycolor = '#5F615E' browncolor = '#835C3B' orangecolor = '#d17a00' yellowcolor = '#b6c700' greencolor" ]
[ "import sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections", "image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = []", "for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)),", "self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self):", "{}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label: # 'ship': return 1 else:", "def split(self, df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return", "xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example =", "x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as", "fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename =", "collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF: def __init__(self, str):", "writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label:", "= b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs =", "from collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF: def __init__(self,", "/ width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class']))", "filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins", "classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] /", "height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width),", "writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label ==", "= io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format", "import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util", "self.label: # 'ship': return 1 else: None def split(self, df, group): data =", "= Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins", "width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = []", "cloudmesh.common.util import path_expand class GenTF: def __init__(self, str): if str == 'train': self.csv_input", "[] ymins = [] ymaxs = [] classes_text = [] classes = []", "<filename>cloudmesh/secchi/tensorflow/preprocessing/generate_tfrecord.py import os import io import pandas as pd import tensorflow as tf", "def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read()", "[] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin']", "import dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF:", "os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer =", "/ height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width':", "os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input)", "def __init__(self, str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input", "index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height)", "= group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins =", "sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple,", "fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height =", "io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format =", "gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path,", "= [] xmaxs = [] ymins = [] ymaxs = [] classes_text =", "else: None def split(self, df, group): data = namedtuple('data', ['filename', 'object']) gb =", "zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg", "'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width,", "group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height)", "f\"{str}.record\") self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples", "== 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path =", "class_text_to_int(self, row_label): if row_label == self.label: # 'ship': return 1 else: None def", "['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(),", "= tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format':", "return 1 else: None def split(self, df, group): data = namedtuple('data', ['filename', 'object'])", "examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in grouped: tf_example =", "group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for", "image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs", "grouped = self.split(examples, 'filename') for group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString())", "str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path", "import path_expand class GenTF: def __init__(self, str): if str == 'train': self.csv_input =", "= self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label):", "split(self, df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename,", "TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label: # 'ship': return 1", "df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group,", "GenTF: def __init__(self, str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else:", "b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = []", "from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict", "os import io import pandas as pd import tensorflow as tf import sys", "= self.split(examples, 'filename') for group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close()", "'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped", "create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples,", "[] ymaxs = [] classes_text = [] classes = [] for index, row", "dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),", "print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in", "'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin':", "# 'ship': return 1 else: None def split(self, df, group): data = namedtuple('data',", "'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'),", "xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8'))", "dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),", "writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename')", "None def split(self, df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group)", "return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path):", "row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax']", "/ height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename),", "import os import io import pandas as pd import tensorflow as tf import", "= [] classes_text = [] classes = [] for index, row in group.object.iterrows():", "'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin':", "for group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the", "tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self,", "grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def", "'filename') for group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created", "group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io =", "in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:", "io import pandas as pd import tensorflow as tf import sys sys.path.append(\"../../models/research\") from", "import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF: def __init__(self, str): if", "'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label':", "pandas as pd import tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL import", "self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in grouped: tf_example", "def class_text_to_int(self, row_label): if row_label == self.label: # 'ship': return 1 else: None", "in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] /", "as tf import sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util", "OrderedDict from cloudmesh.common.util import path_expand class GenTF: def __init__(self, str): if str ==", "= [] ymins = [] ymaxs = [] classes_text = [] classes =", "[] classes_text = [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin']", "str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'),", "filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb')", "= os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \",", "== self.label: # 'ship': return 1 else: None def split(self, df, group): data", "dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),", "import tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf", "os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc'", "as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height", "import io import pandas as pd import tensorflow as tf import sys sys.path.append(\"../../models/research\")", "self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples =", "os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path =", "ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height),", "xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text", "self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label", "print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label: #", "width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example", "'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax':", "pd import tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL import Image from", "/ width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={", "height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id':", "self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str)", "'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax':", "import pandas as pd import tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL", "from cloudmesh.common.util import path_expand class GenTF: def __init__(self, str): if str == 'train':", "classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] /", "group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords:", "class GenTF: def __init__(self, str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv')", "1 else: None def split(self, df, group): data = namedtuple('data', ['filename', 'object']) gb", "if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv')", "else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\")", "from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand", "dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF: def", "group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = []", "tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import", "if row_label == self.label: # 'ship': return 1 else: None def split(self, df,", "create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io", "tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group", "path_expand class GenTF: def __init__(self, str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'),", "dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),", "[data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self, group, path): with", "'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example", "gb.groups)] def create_tf_example(self, group, path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg =", "namedtuple, OrderedDict from cloudmesh.common.util import path_expand class GenTF: def __init__(self, str): if str", "path): with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg)", "data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename,", "tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image =", "'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'),", "= image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs =", "height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins = [] xmaxs", "self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label", "self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if", "tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg),", "self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input:", "str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path)", "pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in grouped: tf_example = self.create_tf_example(group, self.img_path)", "dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),", "tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format),", "= tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for", "= pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in grouped: tf_example = self.create_tf_example(group,", "for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width) ymins.append(row['ymin'] /", "= namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x", "'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text':", "'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), }))", "__init__(self, str): if str == 'train': self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input =", "width) ymins.append(row['ymin'] / height) ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height':", "self.split(examples, 'filename') for group in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully", "= [] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width)", "with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image", "[] classes = [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax']", "= [] ymaxs = [] classes_text = [] classes = [] for index,", "row_label): if row_label == self.label: # 'ship': return 1 else: None def split(self,", "\", self.csv_input) examples = pd.read_csv(self.csv_input) grouped = self.split(examples, 'filename') for group in grouped:", "as pd import tensorflow as tf import sys sys.path.append(\"../../models/research\") from PIL import Image", "row_label == self.label: # 'ship': return 1 else: None def split(self, df, group):", "ymaxs = [] classes_text = [] classes = [] for index, row in", "ymins = [] ymaxs = [] classes_text = [] classes = [] for", "cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util import path_expand class", "Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict from cloudmesh.common.util import", "the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label: # 'ship': return", "xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes", "'{}'.format(group.filename)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io)", "dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return", "= os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def create(self): writer", "= os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'train_labels.csv') else: self.csv_input = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path", "df, group): data = namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x))", "Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg' xmins =", "PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import namedtuple, OrderedDict from", "'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label = 'disc' def", "= fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename", "in grouped: tf_example = self.create_tf_example(group, self.img_path) writer.write(tf_example.SerializeToString()) writer.close() print('Successfully created the TFRecords: {}'.format(self.output_path))", "namedtuple('data', ['filename', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in", "gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def", "classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename),", "classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded':", "= 'disc' def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input)", "ymaxs.append(row['ymax'] / height) classes_text.append(row['class'].encode('utf8')) classes.append(self.class_text_to_int(row['class'])) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename':", "= [] for index, row in group.object.iterrows(): xmins.append(row['xmin'] / width) xmaxs.append(row['xmax'] / width)", "encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8')", "encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size", "image = Image.open(encoded_jpg_io) width, height = image.size filename = group.filename.encode('utf8') image_format = b'jpg'", "'ship': return 1 else: None def split(self, df, group): data = namedtuple('data', ['filename',", "sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from collections import", "= os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), 'test_labels.csv') self.img_path = os.path.join(path_expand('~/.cloudmesh/secchi/images'), str) self.output_path = os.path.join(path_expand('~/.cloudmesh/secchi/annotations'), f\"{str}.record\") self.label =", "tf import sys sys.path.append(\"../../models/research\") from PIL import Image from cloudmesh.secchi.tensorflow.utils_tf import dataset_util from", "= df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(self,", "[] xmaxs = [] ymins = [] ymaxs = [] classes_text = []", "created the TFRecords: {}'.format(self.output_path)) def class_text_to_int(self, row_label): if row_label == self.label: # 'ship':", "def create(self): writer = tf.io.TFRecordWriter(self.output_path) print(\"csv_input: \", self.csv_input) examples = pd.read_csv(self.csv_input) grouped =", "dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes)," ]
[ "Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 * (v", "m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v **", "X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1", "+ 1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X) *", "deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(),", "gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp,", "Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None]", "ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX =", "plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation =", "Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr))", "opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel))", "gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX) ipm, _", "gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0])", "ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3')", "** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input $x$\") ax2.set_ylabel(\"$\\mathbb{V}\\,[p(f(x)", "+ 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr", "= (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten()", "color='C3') deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() -", "gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k,", "ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 *", "opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp =", "= gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X,", "+ np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X,", "v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1)", "np from robustgp import ConditionalVariance X = np.random.rand(150, 1) Y = 0.8 *", "options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y),", "0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3)", "gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100))", "color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(),", "** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\")", "robustgp import ConditionalVariance X = np.random.rand(150, 1) Y = 0.8 * np.cos(10 *", "1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2", "1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2", "1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy()", "fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm,", "0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr =", "ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x')", "Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m, v", "0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX,", "m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 * (v + sp.likelihood.variance.value()) **", "alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o',", "= 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X +", "v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input $x$\")", "plt import numpy as np from robustgp import ConditionalVariance X = np.random.rand(150, 1)", "sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0,", "ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 * (v + sp.likelihood.variance.value())", "np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig,", "as plt import numpy as np from robustgp import ConditionalVariance X = np.random.rand(150,", "k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m,", "gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X,", "deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0,", "_ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX,", "np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape)", "None] m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) =", "np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y),", "import ConditionalVariance X = np.random.rand(150, 1) Y = 0.8 * np.cos(10 * X)", "= gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1,", "ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3')", "ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input $x$\") ax2.set_ylabel(\"$\\mathbb{V}\\,[p(f(x) | \\mathbf{u}]^{0.5}$\")", "+ np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs", "* X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17 *", "'o', color='C3') deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten()", "ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2')", "- deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5)", "gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6,", "numpy as np from robustgp import ConditionalVariance X = np.random.rand(150, 1) Y =", "np.random.rand(150, 1) Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8", "import gpflow import matplotlib.pyplot as plt import numpy as np from robustgp import", "1, 3000)[:, None] m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1,", "= np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value())", "* X + 0.3) + np.cos(17 * X) * 1.2 + np.random.randn(*X.shape) *", "k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX)", "as np from robustgp import ConditionalVariance X = np.random.rand(150, 1) Y = 0.8", "m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2,", "sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(),", "* np.sin(8 * X + 0.3) + np.cos(17 * X) * 1.2 +", "np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17", "+ deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() *", "gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k =", "= sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m)", "import matplotlib.pyplot as plt import numpy as np from robustgp import ConditionalVariance X", "sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2')", "ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten()", "X = np.random.rand(150, 1) Y = 0.8 * np.cos(10 * X) + 1.2", "opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance()", "'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation = (2 * (v +", "pX = np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX) ipm, _ =", "= gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k", "* 1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt =", "+ sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(),", "m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value()", "import numpy as np from robustgp import ConditionalVariance X = np.random.rand(150, 1) Y", "matplotlib.pyplot as plt import numpy as np from robustgp import ConditionalVariance X =", "gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m, v = sp.predict_f(pX) ipm,", "ax2.plot(pX, v ** 0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input", "= plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o', color='C3') deviation", "= gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer", "ipm, 'o', color='C3') deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(),", "6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:, None] m, v =", "Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential()", "0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables,", "X) + 1.2 * np.sin(8 * X + 0.3) + np.cos(17 * X)", "gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX = np.linspace(0, 1, 3000)[:,", "from robustgp import ConditionalVariance X = np.random.rand(150, 1) Y = 0.8 * np.cos(10", "X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt", "* X) * 1.2 + np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential())", "(v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3)", "(ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y, 'x') ax1.plot(pX, m) ax1.plot(sp.inducing_variable.Z.value(), ipm, 'o',", "= opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp", "np.random.randn(*X.shape) * 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs =", "* 0.1 gpr = gpflow.models.GPR((X, Y), gpflow.kernels.SquaredExponential()) opt = gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss,", "sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X, Y,", "= np.random.rand(150, 1) Y = 0.8 * np.cos(10 * X) + 1.2 *", "gpflow import matplotlib.pyplot as plt import numpy as np from robustgp import ConditionalVariance", "(2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() +", "ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation, alpha=0.3) ax1.axvline(pX[np.argmax(v)].item(), color='C2') ax1.set_ylabel(\"y\") ax2.plot(pX, v", "sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input $x$\") ax2.set_ylabel(\"$\\mathbb{V}\\,[p(f(x) | \\mathbf{u}]^{0.5}$\") plt.show()", "gpflow.optimizers.Scipy() opt_logs = opt.minimize(gpr.training_loss, gpr.trainable_variables, options=dict(maxiter=100)) k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer =", "= ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k, Z_initer.compute_initialisation(X, 6, k)[0]) gpflow.utilities.multiple_assign(sp, gpflow.utilities.read_values(gpr)) pX", "* np.cos(10 * X) + 1.2 * np.sin(8 * X + 0.3) +", "1) Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 *", "k = gpflow.kernels.SquaredExponential() gpflow.utilities.multiple_assign(k, gpflow.utilities.read_values(gpr.kernel)) Z_initer = ConditionalVariance() sp = gpflow.models.SGPR((X, Y), k,", "0.5) ax2.plot(sp.inducing_variable.Z.value(), sp.inducing_variable.Z.value() * 0.0, 'o', color='C3') ax2.axvline(pX[np.argmax(v)].item(), color='C2') ax2.set_xlabel(\"input $x$\") ax2.set_ylabel(\"$\\mathbb{V}\\,[p(f(x) |", "Y = 0.8 * np.cos(10 * X) + 1.2 * np.sin(8 * X", "* (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation, m.numpy().flatten() + deviation,", "deviation = (2 * (v + sp.likelihood.variance.value()) ** 0.5).numpy().flatten() ax1.fill_between(pX.flatten(), m.numpy().flatten() - deviation,", "3000)[:, None] m, v = sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2)", "= sp.predict_f(pX) ipm, _ = sp.predict_f(sp.inducing_variable.Z.value()) fig, (ax1, ax2) = plt.subplots(2, 1) ax1.plot(X,", "ConditionalVariance X = np.random.rand(150, 1) Y = 0.8 * np.cos(10 * X) +" ]
[ "transfer of git lfs files if github.getLargeFiles(name) == None: exit(f'Unable to get list", "import Github from src.gitlab import Gitlab def exit(message): print(message) sys.exit() if __name__ ==", "is done status = '' previousStatus = '' finishedStatus = [ 'complete', 'auth_failed',", "Gitlab def exit(message): print(message) sys.exit() if __name__ == \"__main__\": # Get all gitlab", "= str(name).replace(' ', '-') print(f'Starting import of repository: {name}') # Create repository that", "that does not exist if github.repositoryCreate(name, '') == None: print(f'Unable to create repository:", "= status if status == 'importing': # Enable transfer of git lfs files", "to Github finished with status: {status}') print(f'Import of \"{name}\" to Github finished with", "== None: exit(f'Unable to start import of \"{url}\" to github repo named \"{name}\"')", "'' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while", "{name}') # Create repository that does not exist if github.repositoryCreate(name, '') == None:", "\"{name}\" to Github finished with status: {status}') print(f'Import of \"{name}\" to Github finished", "repository: {name}') # Create repository that does not exist if github.repositoryCreate(name, '') ==", "status if status == 'importing': # Enable transfer of git lfs files if", "= str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists", "for name, url in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import of", "if github.repositoryCreate(name, '') == None: print(f'Unable to create repository: {name}') continue # Start", "files in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to set git lfs", "not be exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already", "preference on: {name}') time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\" to Github", "of git lfs files if github.getLargeFiles(name) == None: exit(f'Unable to get list of", "to create repository: {name}') continue # Start import to repository if github.importStart(url, name)", "print(message) sys.exit() if __name__ == \"__main__\": # Get all gitlab repositories gitlab =", "to retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories was fetched from", "in finishedStatus: status = github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus =", "on: {name}') time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\" to Github finished", "gitlab') for name, url in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import", "sys.exit() if __name__ == \"__main__\": # Get all gitlab repositories gitlab = Gitlab()", "repositories') print ('Github repositories found: ' + str(len(github_repos))) # Skip repositories that already", "gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not able to", "in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and will not be", "import to repository if github.importStart(url, name) == None: exit(f'Unable to start import of", "if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and will", "github repo named \"{name}\"') # Check if import is done status = ''", "on Github and will not be exported from gitlab') if alternativeKey in gitlab_repos.keys():", "'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not in finishedStatus: status = github.importStatus(name)", "already exists on github for key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ')", "# Create repository that does not exist if github.repositoryCreate(name, '') == None: print(f'Unable", "None: exit('Not able to retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories", "on github for key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key", "None: exit(f'Unable to get list of git lfs files in repo: {name}') if", "time from src.github import Github from src.gitlab import Gitlab def exit(message): print(message) sys.exit()", "of repository: {name}') # Create repository that does not exist if github.repositoryCreate(name, '')", "' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github", "print(f'Repository \"{key}\" already exsists on Github and will not be exported from gitlab')", "and will not be exported from gitlab') for name, url in gitlab_repos.items(): name", "repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to set git lfs preference on:", "'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not in finishedStatus: status =", "previousStatus = '' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None", "{name}') if github.lfsPreference(name) == None: exit(f'Unable to set git lfs preference on: {name}')", "print(f'Starting import of repository: {name}') # Create repository that does not exist if", "None: exit('Not able to retreive github repositories') print ('Github repositories found: ' +", "\"{alternativeKey}\" already exsists on Github and will not be exported from gitlab') for", "gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and", "{status}') previousStatus = status if status == 'importing': # Enable transfer of git", "repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not able", "= Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not able to retreive", "exit('Not able to retreive github repositories') print ('Github repositories found: ' + str(len(github_repos)))", "does not exist if github.repositoryCreate(name, '') == None: print(f'Unable to create repository: {name}')", "\"{name}\"') # Check if import is done status = '' previousStatus = ''", "will not be exported from gitlab') for name, url in gitlab_repos.items(): name =", "from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github", "if import is done status = '' previousStatus = '' finishedStatus = [", "gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and will not be exported from", "print(f'Status: {status}') previousStatus = status if status == 'importing': # Enable transfer of", "] while status not in finishedStatus: status = github.importStatus(name) if previousStatus != status:", "continue # Start import to repository if github.importStart(url, name) == None: exit(f'Unable to", "', '-') print(f'Starting import of repository: {name}') # Create repository that does not", "\"{url}\" to github repo named \"{name}\"') # Check if import is done status", "retreive github repositories') print ('Github repositories found: ' + str(len(github_repos))) # Skip repositories", "src.github import Github from src.gitlab import Gitlab def exit(message): print(message) sys.exit() if __name__", "\"__main__\": # Get all gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if", "\"{key}\" already exsists on Github and will not be exported from gitlab') if", "if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and will", "'') == None: print(f'Unable to create repository: {name}') continue # Start import to", "import of \"{url}\" to github repo named \"{name}\"') # Check if import is", "'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not in finishedStatus: status", "from the gitlab account') print ('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get", "exist if github.repositoryCreate(name, '') == None: print(f'Unable to create repository: {name}') continue #", "git lfs files in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to set", "able to retreive github repositories') print ('Github repositories found: ' + str(len(github_repos))) #", "not be exported from gitlab') for name, url in gitlab_repos.items(): name = str(name).replace('", "gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories was fetched from the gitlab", "if previousStatus != status: print(f'Status: {status}') previousStatus = status if status == 'importing':", "Github() github_repos = github.repositories() if github_repos == None: exit('Not able to retreive github", "to start import of \"{url}\" to github repo named \"{name}\"') # Check if", "git lfs files if github.getLargeFiles(name) == None: exit(f'Unable to get list of git", "if github.getLargeFiles(name) == None: exit(f'Unable to get list of git lfs files in", "== None: print(f'Unable to create repository: {name}') continue # Start import to repository", "Github from src.gitlab import Gitlab def exit(message): print(message) sys.exit() if __name__ == \"__main__\":", "+ str(len(gitlab_repos))) # Get all github repositories github = Github() github_repos = github.repositories()", "list of git lfs files in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable", "in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to set git lfs preference", "retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories was fetched from the", "import time from src.github import Github from src.gitlab import Gitlab def exit(message): print(message)", "github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\"", "found: ' + str(len(gitlab_repos))) # Get all github repositories github = Github() github_repos", "('Github repositories found: ' + str(len(github_repos))) # Skip repositories that already exists on", "# Get all gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos", "and will not be exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository", "Create repository that does not exist if github.repositoryCreate(name, '') == None: print(f'Unable to", "name) == None: exit(f'Unable to start import of \"{url}\" to github repo named", "import Gitlab def exit(message): print(message) sys.exit() if __name__ == \"__main__\": # Get all", "= '' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ]", "that already exists on github for key in github_repos.keys(): alternativeKey = str(key).replace('-', '", "!= 'complete': exit(f'Import of \"{name}\" to Github finished with status: {status}') print(f'Import of", "exit(f'Unable to start import of \"{url}\" to github repo named \"{name}\"') # Check", "# Get all github repositories github = Github() github_repos = github.repositories() if github_repos", "able to retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories was fetched", "'detection_found_multiple', None ] while status not in finishedStatus: status = github.importStatus(name) if previousStatus", "to github repo named \"{name}\"') # Check if import is done status =", "status = '' previousStatus = '' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth',", "github_repos == None: exit('Not able to retreive github repositories') print ('Github repositories found:", "__name__ == \"__main__\": # Get all gitlab repositories gitlab = Gitlab() gitlab_repos =", "# Skip repositories that already exists on github for key in github_repos.keys(): alternativeKey", "will not be exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\"", "== 'importing': # Enable transfer of git lfs files if github.getLargeFiles(name) == None:", "lfs preference on: {name}') time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\" to", "name, url in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import of repository:", "== None: exit(f'Unable to set git lfs preference on: {name}') time.sleep(1) if status", "from src.gitlab import Gitlab def exit(message): print(message) sys.exit() if __name__ == \"__main__\": #", "done status = '' previousStatus = '' finishedStatus = [ 'complete', 'auth_failed', 'error',", "== dict(): exit('Zero repositories was fetched from the gitlab account') print ('Gitlab repositories", "github.repositories() if github_repos == None: exit('Not able to retreive github repositories') print ('Github", "git lfs preference on: {name}') time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\"", "get list of git lfs files in repo: {name}') if github.lfsPreference(name) == None:", "create repository: {name}') continue # Start import to repository if github.importStart(url, name) ==", "to repository if github.importStart(url, name) == None: exit(f'Unable to start import of \"{url}\"", "[ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not in", "was fetched from the gitlab account') print ('Gitlab repositories found: ' + str(len(gitlab_repos)))", "lfs files if github.getLargeFiles(name) == None: exit(f'Unable to get list of git lfs", "== None: exit(f'Unable to get list of git lfs files in repo: {name}')", "repositories was fetched from the gitlab account') print ('Gitlab repositories found: ' +", "Skip repositories that already exists on github for key in github_repos.keys(): alternativeKey =", "None: print(f'Unable to create repository: {name}') continue # Start import to repository if", "# Start import to repository if github.importStart(url, name) == None: exit(f'Unable to start", "if status == 'importing': # Enable transfer of git lfs files if github.getLargeFiles(name)", "github.repositoryCreate(name, '') == None: print(f'Unable to create repository: {name}') continue # Start import", "!= status: print(f'Status: {status}') previousStatus = status if status == 'importing': # Enable", "github.importStart(url, name) == None: exit(f'Unable to start import of \"{url}\" to github repo", "= Github() github_repos = github.repositories() if github_repos == None: exit('Not able to retreive", "github_repos = github.repositories() if github_repos == None: exit('Not able to retreive github repositories')", "print(f'Repository \"{alternativeKey}\" already exsists on Github and will not be exported from gitlab')", "to set git lfs preference on: {name}') time.sleep(1) if status != 'complete': exit(f'Import", "all gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None:", "exit(message): print(message) sys.exit() if __name__ == \"__main__\": # Get all gitlab repositories gitlab", "== None: exit('Not able to retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero", "gitlab.repositories() if gitlab_repos == None: exit('Not able to retreive gitlab repositories') elif gitlab_repos", "exit(f'Unable to get list of git lfs files in repo: {name}') if github.lfsPreference(name)", "None ] while status not in finishedStatus: status = github.importStatus(name) if previousStatus !=", "be exported from gitlab') for name, url in gitlab_repos.items(): name = str(name).replace(' ',", "repository: {name}') continue # Start import to repository if github.importStart(url, name) == None:", "import sys import time from src.github import Github from src.gitlab import Gitlab def", "if gitlab_repos == None: exit('Not able to retreive gitlab repositories') elif gitlab_repos ==", "status: print(f'Status: {status}') previousStatus = status if status == 'importing': # Enable transfer", "not exist if github.repositoryCreate(name, '') == None: print(f'Unable to create repository: {name}') continue", "exit(f'Unable to set git lfs preference on: {name}') time.sleep(1) if status != 'complete':", "= gitlab.repositories() if gitlab_repos == None: exit('Not able to retreive gitlab repositories') elif", "repositories github = Github() github_repos = github.repositories() if github_repos == None: exit('Not able", "the gitlab account') print ('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get all", "files if github.getLargeFiles(name) == None: exit(f'Unable to get list of git lfs files", "status = github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus = status if", "+ str(len(github_repos))) # Skip repositories that already exists on github for key in", "name = str(name).replace(' ', '-') print(f'Starting import of repository: {name}') # Create repository", "== None: exit('Not able to retreive github repositories') print ('Github repositories found: '", "= '' previousStatus = '' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing',", "if github_repos == None: exit('Not able to retreive github repositories') print ('Github repositories", "account') print ('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get all github repositories", "key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key)", "Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not able to retreive gitlab", "all github repositories github = Github() github_repos = github.repositories() if github_repos == None:", "status != 'complete': exit(f'Import of \"{name}\" to Github finished with status: {status}') print(f'Import", "github for key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key in", "exit('Not able to retreive gitlab repositories') elif gitlab_repos == dict(): exit('Zero repositories was", "exsists on Github and will not be exported from gitlab') for name, url", "repository if github.importStart(url, name) == None: exit(f'Unable to start import of \"{url}\" to", "'detection_found_nothing', 'detection_found_multiple', None ] while status not in finishedStatus: status = github.importStatus(name) if", "alternativeKey = str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already", "url in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import of repository: {name}')", "github.lfsPreference(name) == None: exit(f'Unable to set git lfs preference on: {name}') time.sleep(1) if", "src.gitlab import Gitlab def exit(message): print(message) sys.exit() if __name__ == \"__main__\": # Get", "lfs files in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to set git", "gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and will not be exported from", "elif gitlab_repos == dict(): exit('Zero repositories was fetched from the gitlab account') print", "to retreive github repositories') print ('Github repositories found: ' + str(len(github_repos))) # Skip", "Github and will not be exported from gitlab') for name, url in gitlab_repos.items():", "while status not in finishedStatus: status = github.importStatus(name) if previousStatus != status: print(f'Status:", "repositories') elif gitlab_repos == dict(): exit('Zero repositories was fetched from the gitlab account')", "from src.github import Github from src.gitlab import Gitlab def exit(message): print(message) sys.exit() if", "github repositories github = Github() github_repos = github.repositories() if github_repos == None: exit('Not", "Github finished with status: {status}') print(f'Import of \"{name}\" to Github finished with status:", "gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not", "'importing': # Enable transfer of git lfs files if github.getLargeFiles(name) == None: exit(f'Unable", "{name}') continue # Start import to repository if github.importStart(url, name) == None: exit(f'Unable", "fetched from the gitlab account') print ('Gitlab repositories found: ' + str(len(gitlab_repos))) #", "import is done status = '' previousStatus = '' finishedStatus = [ 'complete',", "'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not in finishedStatus:", "finished with status: {status}') print(f'Import of \"{name}\" to Github finished with status: {status}')", "be exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists", "exsists on Github and will not be exported from gitlab') if alternativeKey in", "finishedStatus: status = github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus = status", "import of repository: {name}') # Create repository that does not exist if github.repositoryCreate(name,", "not in finishedStatus: status = github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus", "str(len(github_repos))) # Skip repositories that already exists on github for key in github_repos.keys():", "gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import of repository: {name}') # Create", "start import of \"{url}\" to github repo named \"{name}\"') # Check if import", "def exit(message): print(message) sys.exit() if __name__ == \"__main__\": # Get all gitlab repositories", "github repositories') print ('Github repositories found: ' + str(len(github_repos))) # Skip repositories that", "already exsists on Github and will not be exported from gitlab') if alternativeKey", "github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus = status if status ==", "Start import to repository if github.importStart(url, name) == None: exit(f'Unable to start import", "finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status", "repositories found: ' + str(len(github_repos))) # Skip repositories that already exists on github", "None: exit(f'Unable to set git lfs preference on: {name}') time.sleep(1) if status !=", "'complete': exit(f'Import of \"{name}\" to Github finished with status: {status}') print(f'Import of \"{name}\"", "on Github and will not be exported from gitlab') for name, url in", "# Check if import is done status = '' previousStatus = '' finishedStatus", "print ('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get all github repositories github", "key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and will not", "in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository", "github = Github() github_repos = github.repositories() if github_repos == None: exit('Not able to", "print(f'Unable to create repository: {name}') continue # Start import to repository if github.importStart(url,", "' + str(len(gitlab_repos))) # Get all github repositories github = Github() github_repos =", "if github.importStart(url, name) == None: exit(f'Unable to start import of \"{url}\" to github", "dict(): exit('Zero repositories was fetched from the gitlab account') print ('Gitlab repositories found:", "in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting import of repository: {name}') #", "of \"{url}\" to github repo named \"{name}\"') # Check if import is done", "str(key).replace('-', ' ') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on", "set git lfs preference on: {name}') time.sleep(1) if status != 'complete': exit(f'Import of", "Github and will not be exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey)", "exists on github for key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if", "{name}') time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\" to Github finished with", "str(name).replace(' ', '-') print(f'Starting import of repository: {name}') # Create repository that does", "print ('Github repositories found: ' + str(len(github_repos))) # Skip repositories that already exists", "'' previousStatus = '' finishedStatus = [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple',", "previousStatus != status: print(f'Status: {status}') previousStatus = status if status == 'importing': #", "repositories found: ' + str(len(gitlab_repos))) # Get all github repositories github = Github()", "repo named \"{name}\"') # Check if import is done status = '' previousStatus", "previousStatus = status if status == 'importing': # Enable transfer of git lfs", "if __name__ == \"__main__\": # Get all gitlab repositories gitlab = Gitlab() gitlab_repos", "gitlab_repos == None: exit('Not able to retreive gitlab repositories') elif gitlab_repos == dict():", "repository that does not exist if github.repositoryCreate(name, '') == None: print(f'Unable to create", "('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get all github repositories github =", "for key in github_repos.keys(): alternativeKey = str(key).replace('-', ' ') if key in gitlab_repos.keys():", "# Enable transfer of git lfs files if github.getLargeFiles(name) == None: exit(f'Unable to", "of git lfs files in repo: {name}') if github.lfsPreference(name) == None: exit(f'Unable to", "Get all gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories() if gitlab_repos ==", "'-') print(f'Starting import of repository: {name}') # Create repository that does not exist", "if github.lfsPreference(name) == None: exit(f'Unable to set git lfs preference on: {name}') time.sleep(1)", "exit('Zero repositories was fetched from the gitlab account') print ('Gitlab repositories found: '", "== \"__main__\": # Get all gitlab repositories gitlab = Gitlab() gitlab_repos = gitlab.repositories()", "') if key in gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and", "= [ 'complete', 'auth_failed', 'error', 'detection_needs_auth', 'detection_found_nothing', 'detection_found_multiple', None ] while status not", "sys import time from src.github import Github from src.gitlab import Gitlab def exit(message):", "time.sleep(1) if status != 'complete': exit(f'Import of \"{name}\" to Github finished with status:", "None: exit(f'Unable to start import of \"{url}\" to github repo named \"{name}\"') #", "of \"{name}\" to Github finished with status: {status}') print(f'Import of \"{name}\" to Github", "gitlab_repos == dict(): exit('Zero repositories was fetched from the gitlab account') print ('Gitlab", "gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and will not be exported", "found: ' + str(len(github_repos))) # Skip repositories that already exists on github for", "if status != 'complete': exit(f'Import of \"{name}\" to Github finished with status: {status}')", "str(len(gitlab_repos))) # Get all github repositories github = Github() github_repos = github.repositories() if", "Get all github repositories github = Github() github_repos = github.repositories() if github_repos ==", "= github.importStatus(name) if previousStatus != status: print(f'Status: {status}') previousStatus = status if status", "github.getLargeFiles(name) == None: exit(f'Unable to get list of git lfs files in repo:", "exported from gitlab') for name, url in gitlab_repos.items(): name = str(name).replace(' ', '-')", "gitlab_repos = gitlab.repositories() if gitlab_repos == None: exit('Not able to retreive gitlab repositories')", "= github.repositories() if github_repos == None: exit('Not able to retreive github repositories') print", "gitlab account') print ('Gitlab repositories found: ' + str(len(gitlab_repos))) # Get all github", "alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and will not", "to get list of git lfs files in repo: {name}') if github.lfsPreference(name) ==", "exit(f'Import of \"{name}\" to Github finished with status: {status}') print(f'Import of \"{name}\" to", "gitlab_repos.keys(): gitlab_repos.pop(key) print(f'Repository \"{key}\" already exsists on Github and will not be exported", "in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on Github and will not be", "already exsists on Github and will not be exported from gitlab') for name,", "named \"{name}\"') # Check if import is done status = '' previousStatus =", "from gitlab') for name, url in gitlab_repos.items(): name = str(name).replace(' ', '-') print(f'Starting", "exported from gitlab') if alternativeKey in gitlab_repos.keys(): gitlab_repos.pop(alternativeKey) print(f'Repository \"{alternativeKey}\" already exsists on", "Check if import is done status = '' previousStatus = '' finishedStatus =", "' + str(len(github_repos))) # Skip repositories that already exists on github for key", "status == 'importing': # Enable transfer of git lfs files if github.getLargeFiles(name) ==", "status not in finishedStatus: status = github.importStatus(name) if previousStatus != status: print(f'Status: {status}')", "repositories that already exists on github for key in github_repos.keys(): alternativeKey = str(key).replace('-',", "Enable transfer of git lfs files if github.getLargeFiles(name) == None: exit(f'Unable to get" ]
[ "This converts gradients of the embedding variable to tensors which allows to use", "convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to a tensor.\"\"\" return x def", "The embedding tensor. ids: The ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``,", "which allows to use of optimizers that don't support sparse gradients (e.g. Adafactor).", "tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to", "from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def", "that don't support sparse gradients (e.g. Adafactor). Args: params: The embedding tensor. ids:", "convert its gradient to a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around", "common layers.\"\"\" import tensorflow as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x,", "dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient", "op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to a tensor.\"\"\"", "ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the embedding variable to tensors", "to convert its gradient to a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper", "a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients", "embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the embedding variable to", "import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps", "\"\"\"Defines common layers.\"\"\" import tensorflow as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda", "@function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to", "embedding variable to tensors which allows to use of optimizers that don't support", "to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`.", "The ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond", "Adafactor). Args: params: The embedding tensor. ids: The ids to lookup in :obj:`params`.", "shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to a", "tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of", "of optimizers that don't support sparse gradients (e.g. Adafactor). Args: params: The embedding", "tensor. ids: The ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings", "\"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the embedding variable to tensors which", "layers.\"\"\" import tensorflow as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy:", "gradient to a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This", "sparse gradients (e.g. Adafactor). Args: params: The embedding tensor. ids: The ids to", "``tf.nn.embedding_lookup``. This converts gradients of the embedding variable to tensors which allows to", "variable to tensors which allows to use of optimizers that don't support sparse", "in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\" params", "don't support sparse gradients (e.g. Adafactor). Args: params: The embedding tensor. ids: The", "tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()])", "the embedding variable to tensors which allows to use of optimizers that don't", "as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op:", "<reponame>dblandan/OpenNMT-tf \"\"\"Defines common layers.\"\"\" import tensorflow as tf from tensorflow.python.framework import function @function.Defun(", "params: The embedding tensor. ids: The ids to lookup in :obj:`params`. Returns: A", "to use of optimizers that don't support sparse gradients (e.g. Adafactor). Args: params:", "converts gradients of the embedding variable to tensors which allows to use of", "allows to use of optimizers that don't support sparse gradients (e.g. Adafactor). Args:", "``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\" params = convert_gradient_to_tensor(params) return tf.nn.embedding_lookup(params,", "function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x`", "python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert", "lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\"", "import tensorflow as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy),", "\"\"\"Wraps :obj:`x` to convert its gradient to a tensor.\"\"\" return x def embedding_lookup(params,", "def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the embedding variable", "ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond to", "x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the embedding", "return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts gradients of the", "to tensors which allows to use of optimizers that don't support sparse gradients", "x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its", "optimizers that don't support sparse gradients (e.g. Adafactor). Args: params: The embedding tensor.", "its gradient to a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``.", "tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda op: [op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x):", "to a tensor.\"\"\" return x def embedding_lookup(params, ids): \"\"\"Wrapper around ``tf.nn.embedding_lookup``. This converts", "def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to a tensor.\"\"\" return x", "of the embedding variable to tensors which allows to use of optimizers that", "Args: params: The embedding tensor. ids: The ids to lookup in :obj:`params`. Returns:", "Returns: A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\" params = convert_gradient_to_tensor(params)", "A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\" params = convert_gradient_to_tensor(params) return", "(e.g. Adafactor). Args: params: The embedding tensor. ids: The ids to lookup in", "embedding tensor. ids: The ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the", "use of optimizers that don't support sparse gradients (e.g. Adafactor). Args: params: The", "gradients (e.g. Adafactor). Args: params: The embedding tensor. ids: The ids to lookup", "tensors which allows to use of optimizers that don't support sparse gradients (e.g.", ":obj:`params`. Returns: A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`. \"\"\" params =", ":obj:`x` to convert its gradient to a tensor.\"\"\" return x def embedding_lookup(params, ids):", "tensorflow as tf from tensorflow.python.framework import function @function.Defun( python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), shape_func=lambda", "gradients of the embedding variable to tensors which allows to use of optimizers", "[op.inputs[0].get_shape()]) def convert_gradient_to_tensor(x): \"\"\"Wraps :obj:`x` to convert its gradient to a tensor.\"\"\" return", "around ``tf.nn.embedding_lookup``. This converts gradients of the embedding variable to tensors which allows", "support sparse gradients (e.g. Adafactor). Args: params: The embedding tensor. ids: The ids", "ids: The ids to lookup in :obj:`params`. Returns: A ``tf.Tensor``, the embeddings that", "the embeddings that correspond to :obj:`ids`. \"\"\" params = convert_gradient_to_tensor(params) return tf.nn.embedding_lookup(params, ids)" ]
[ "base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\",", "import api from app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/'", "import search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp',", "Null!\"} name = request.form['name'] if name != \"\": tag = Tag.add(name) if tag:", "file\" return jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename)", "= res response[\"status\"] = 200 else: response[\"msg\"] = \"tag has already exists!\" return", "def add_branch(): response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name']", "from app.models.tag import Tag from app.api import api from app.whoosh import search_helper from", "file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No selected file\" return", "= request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No selected file\" return jsonify(result)", "\"\": tag = Tag.add(name) if tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag']", "request.method == \"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] = \"No file", "parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"]", "api from app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS", "already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\": 500,", "!= \"\": branch = Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\":", "jsonify(result) file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No selected file\"", "= 200 else: response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST'])", "set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name", "\"\" } if request.method == \"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"]", "request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\": \"name is Null!\"} name =", "'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name = request.args.get('name', 0, type=int)", "= request.form['parent'] if name != \"\": branch = Branch.add(name, parent_id=parent_id) if branch: res", "\"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data", "search_helper.search(keyword) data = {} data[\"result\"] = res return jsonify(data) def allowed_file(filename): return '.'", "= {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] if name !=", "res = {\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] = 200 else:", "import json from werkzeug.utils import secure_filename import os from app.models.fragment import Fragment from", "@api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name = request.args.get('name', 0, type=int) response =", "branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] = 200", "{} data[\"result\"] = res return jsonify(data) def allowed_file(filename): return '.' in filename and", "request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No selected file\" return jsonify(result) if", "from app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS =", "already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data = {}", "@api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"] = res return", "Tag.add(name) if tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"]", "from flask import make_response, request, jsonify from flask_login import login_required import json from", "def search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"] = res return jsonify(data)", "= {} data[\"result\"] = res return jsonify(data) def allowed_file(filename): return '.' in filename", "file part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"] =", "if file.filename == '': result[\"message\"] = \"No selected file\" return jsonify(result) if file", "ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag():", "'.' in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result", "= secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"]", "in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" : 0, \"message\" :", "== \"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] = \"No file part\"", "# -*- coding: utf-8 -*- from flask import make_response, request, jsonify from flask_login", "methods=['POST']) @login_required def add_branch(): response = {\"status\": 500, \"msg\": \"name is Null!\"} name", "res response[\"status\"] = 200 else: response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response))", "name != \"\": branch = Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id,", "parent_id = request.form['parent'] if name != \"\": branch = Branch.add(name, parent_id=parent_id) if branch:", "= request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\": \"name is Null!\"} name", "name = request.form['name'] parent_id = request.form['parent'] if name != \"\": branch = Branch.add(name,", "= request.form['name'] parent_id = request.form['parent'] if name != \"\": branch = Branch.add(name, parent_id=parent_id)", "= res response[\"status\"] = 200 else: response[\"msg\"] = \"branch has already exists!\" return", "ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" : 0, \"message\" : \"\",", "result[\"message\"] = \"No selected file\" return jsonify(result) if file and allowed_file(file.filename): filename =", "'': result[\"message\"] = \"No selected file\" return jsonify(result) if file and allowed_file(file.filename): filename", "= os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+ UPLOAD_FOLDER", "from flask_login import login_required import json from werkzeug.utils import secure_filename import os from", "\"success\" : 0, \"message\" : \"\", \"url\" : \"\" } if request.method ==", "res = search_helper.search(keyword) data = {} data[\"result\"] = res return jsonify(data) def allowed_file(filename):", "allowed_file(filename): return '.' in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def", "filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\"", "secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] =", "app.api import api from app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER =", "request.form['name'] if name != \"\": tag = Tag.add(name) if tag: res = {\"id\":", "'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def", ": 0, \"message\" : \"\", \"url\" : \"\" } if request.method == \"POST\":", "search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png',", "-*- coding: utf-8 -*- from flask import make_response, request, jsonify from flask_login import", "0, \"message\" : \"\", \"url\" : \"\" } if request.method == \"POST\": print(request.files)", "500, \"msg\": \"name is Null!\"} name = request.form['name'] if name != \"\": tag", "def upload_image(): result = { \"success\" : 0, \"message\" : \"\", \"url\" :", "import Fragment from app.models.branch import Branch from app.models.tag import Tag from app.api import", "result = { \"success\" : 0, \"message\" : \"\", \"url\" : \"\" }", "add_branch(): response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] parent_id", "app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg',", "secure_filename import os from app.models.fragment import Fragment from app.models.branch import Branch from app.models.tag", "return '.' in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image():", "app.models.branch import Branch from app.models.tag import Tag from app.api import api from app.whoosh", "app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp',", "jsonify from flask_login import login_required import json from werkzeug.utils import secure_filename import os", "'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name = request.args.get('name', 0,", "tag = Tag.add(name) if tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag'] =", "def allowed_file(filename): return '.' in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST'])", "coding: utf-8 -*- from flask import make_response, request, jsonify from flask_login import login_required", "= 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required", "flask_login import login_required import json from werkzeug.utils import secure_filename import os from app.models.fragment", "is Null!\"} name = request.form['name'] parent_id = request.form['parent'] if name != \"\": branch", "response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] parent_id =", "= search_helper.search(keyword) data = {} data[\"result\"] = res return jsonify(data) def allowed_file(filename): return", "result[\"message\"] = \"No file part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename ==", "\"name is Null!\"} name = request.form['name'] parent_id = request.form['parent'] if name != \"\":", "import login_required import json from werkzeug.utils import secure_filename import os from app.models.fragment import", "500, \"msg\": \"name is Null!\"} name = request.form['name'] parent_id = request.form['parent'] if name", "and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\"", "= Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch'] =", "tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] = 200", "response[\"status\"] = 200 else: response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\")", "= 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+ UPLOAD_FOLDER + filename return jsonify(result)", "\"msg\": \"name is Null!\"} name = request.form['name'] parent_id = request.form['parent'] if name !=", "Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch'] = res", "response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"branch has already exists!\"", "= set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): #", "tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"tag", "app.models.fragment import Fragment from app.models.branch import Branch from app.models.tag import Tag from app.api", "@login_required def add_tag(): # name = request.args.get('name', 0, type=int) response = {\"status\": 500,", "= request.form['name'] if name != \"\": tag = Tag.add(name) if tag: res =", "import Tag from app.api import api from app.whoosh import search_helper from app import", ": \"\" } if request.method == \"POST\": print(request.files) if 'editormd-image-file' not in request.files:", "Branch from app.models.tag import Tag from app.api import api from app.whoosh import search_helper", "from werkzeug.utils import secure_filename import os from app.models.fragment import Fragment from app.models.branch import", "if tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] =", "has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\":", "\"No selected file\" return jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path", "add_tag(): # name = request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\": \"name", "res return jsonify(data) def allowed_file(filename): return '.' in filename and \\ filename.rsplit('.', 1)[1].lower()", ": \"\", \"url\" : \"\" } if request.method == \"POST\": print(request.files) if 'editormd-image-file'", "!= \"\": tag = Tag.add(name) if tag: res = {\"id\": tag.id, \"name\": tag.name}", "= Tag.add(name) if tag: res = {\"id\": tag.id, \"name\": tag.name} response['tag'] = res", "0, type=int) response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name']", "if name != \"\": branch = Branch.add(name, parent_id=parent_id) if branch: res = {\"id\":", "\"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response =", "\"No file part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"]", "else: response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def", "make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"] = res", "not in request.files: result[\"message\"] = \"No file part\" return jsonify(result) file = request.files['editormd-image-file']", "{\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"] =", "\"name\": tag.name} response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"tag has", "request.form['name'] parent_id = request.form['parent'] if name != \"\": branch = Branch.add(name, parent_id=parent_id) if", "if 'editormd-image-file' not in request.files: result[\"message\"] = \"No file part\" return jsonify(result) file", "= \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response", "= {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] parent_id = request.form['parent']", "in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result =", "\"url\" : \"\" } if request.method == \"POST\": print(request.files) if 'editormd-image-file' not in", "exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"]", "from app.models.branch import Branch from app.models.tag import Tag from app.api import api from", "Tag from app.api import api from app.whoosh import search_helper from app import base_dir", "branch.name} response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"branch has already", "\"\", \"url\" : \"\" } if request.method == \"POST\": print(request.files) if 'editormd-image-file' not", "return jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path)", "return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\": 500, \"msg\": \"name", "make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\": 500, \"msg\": \"name is", "@login_required def add_branch(): response = {\"status\": 500, \"msg\": \"name is Null!\"} name =", "'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name =", "= \"No file part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename == '':", "from app.models.fragment import Fragment from app.models.branch import Branch from app.models.tag import Tag from", "print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] = \"No file part\" return jsonify(result)", "1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" : 0, \"message\"", "branch = Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch']", "app.models.tag import Tag from app.api import api from app.whoosh import search_helper from app", "res response[\"status\"] = 200 else: response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response))", "= res return jsonify(data) def allowed_file(filename): return '.' in filename and \\ filename.rsplit('.',", "@api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\": 500, \"msg\": \"name is Null!\"}", "response[\"status\"] = 200 else: response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\",", "import make_response, request, jsonify from flask_login import login_required import json from werkzeug.utils import", "branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"branch", "if branch: res = {\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] =", "\"name\": branch.name} response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"branch has", "} if request.method == \"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] =", "utf-8 -*- from flask import make_response, request, jsonify from flask_login import login_required import", "name != \"\": tag = Tag.add(name) if tag: res = {\"id\": tag.id, \"name\":", "response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] if name", "= \"No selected file\" return jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename)", "return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"] =", "is Null!\"} name = request.form['name'] if name != \"\": tag = Tag.add(name) if", "upload_image(): result = { \"success\" : 0, \"message\" : \"\", \"url\" : \"\"", "if request.method == \"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] = \"No", "= {\"id\": branch.id, \"name\": branch.name} response['branch'] = res response[\"status\"] = 200 else: response[\"msg\"]", "request.form['parent'] if name != \"\": branch = Branch.add(name, parent_id=parent_id) if branch: res =", "{\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"] =", "@api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" : 0, \"message\" : \"\", \"url\"", "file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1", "= {\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"]", "== '': result[\"message\"] = \"No selected file\" return jsonify(result) if file and allowed_file(file.filename):", "jsonify(data) def allowed_file(filename): return '.' in filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS", "return jsonify(data) def allowed_file(filename): return '.' in filename and \\ filename.rsplit('.', 1)[1].lower() in", "part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No", "import secure_filename import os from app.models.fragment import Fragment from app.models.branch import Branch from", "json from werkzeug.utils import secure_filename import os from app.models.fragment import Fragment from app.models.branch", "flask import make_response, request, jsonify from flask_login import login_required import json from werkzeug.utils", "name = request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\": \"name is Null!\"}", "filename and \\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = {", "{ \"success\" : 0, \"message\" : \"\", \"url\" : \"\" } if request.method", "data = {} data[\"result\"] = res return jsonify(data) def allowed_file(filename): return '.' in", "-*- from flask import make_response, request, jsonify from flask_login import login_required import json", "= \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword)", "import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif'])", "def add_tag(): # name = request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\":", "'editormd-image-file' not in request.files: result[\"message\"] = \"No file part\" return jsonify(result) file =", "200 else: response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword):", "# name = request.args.get('name', 0, type=int) response = {\"status\": 500, \"msg\": \"name is", "{\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] if name != \"\":", "UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST'])", "request, jsonify from flask_login import login_required import json from werkzeug.utils import secure_filename import", "name = request.form['name'] if name != \"\": tag = Tag.add(name) if tag: res", "type=int) response = {\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] if", "tag.name} response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"tag has already", "response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res =", "if name != \"\": tag = Tag.add(name) if tag: res = {\"id\": tag.id,", "filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" : 0,", "jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"]", "file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+ UPLOAD_FOLDER + filename", "\"msg\": \"name is Null!\"} name = request.form['name'] if name != \"\": tag =", "request.files: result[\"message\"] = \"No file part\" return jsonify(result) file = request.files['editormd-image-file'] if file.filename", "\"name is Null!\"} name = request.form['name'] if name != \"\": tag = Tag.add(name)", "save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+", "{\"status\": 500, \"msg\": \"name is Null!\"} name = request.form['name'] parent_id = request.form['parent'] if", "Null!\"} name = request.form['name'] parent_id = request.form['parent'] if name != \"\": branch =", "make_response, request, jsonify from flask_login import login_required import json from werkzeug.utils import secure_filename", "else: response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res", "from app import base_dir UPLOAD_FOLDER = 'static/resource/uploads/image/' ALLOWED_EXTENSIONS = set(['bmp', 'webp', 'png', 'jpg',", "from app.api import api from app.whoosh import search_helper from app import base_dir UPLOAD_FOLDER", "'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name = request.args.get('name', 0, type=int) response", "exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch(): response = {\"status\": 500, \"msg\":", "werkzeug.utils import secure_filename import os from app.models.fragment import Fragment from app.models.branch import Branch", "selected file\" return jsonify(result) if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path =", "= { \"success\" : 0, \"message\" : \"\", \"url\" : \"\" } if", "in request.files: result[\"message\"] = \"No file part\" return jsonify(result) file = request.files['editormd-image-file'] if", "and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"]", "result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+ UPLOAD_FOLDER + filename return", "\"message\" : \"\", \"url\" : \"\" } if request.method == \"POST\": print(request.files) if", "if file and allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] =", "= 200 else: response[\"msg\"] = \"branch has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def", "os from app.models.fragment import Fragment from app.models.branch import Branch from app.models.tag import Tag", "response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required def add_branch():", "Fragment from app.models.branch import Branch from app.models.tag import Tag from app.api import api", "\\ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS @api.route(\"/upload_image/\",methods=['POST']) def upload_image(): result = { \"success\" :", "import os from app.models.fragment import Fragment from app.models.branch import Branch from app.models.tag import", "search(keyword): res = search_helper.search(keyword) data = {} data[\"result\"] = res return jsonify(data) def", "200 else: response[\"msg\"] = \"tag has already exists!\" return make_response(json.dumps(response)) @api.route(\"/add_branch/\", methods=['POST']) @login_required", "\"\": branch = Branch.add(name, parent_id=parent_id) if branch: res = {\"id\": branch.id, \"name\": branch.name}", "'png', 'jpg', 'jpeg', 'gif']) @api.route(\"/add_tag/\", methods=['POST']) @login_required def add_tag(): # name = request.args.get('name',", "file.filename == '': result[\"message\"] = \"No selected file\" return jsonify(result) if file and", "\"POST\": print(request.files) if 'editormd-image-file' not in request.files: result[\"message\"] = \"No file part\" return", "import Branch from app.models.tag import Tag from app.api import api from app.whoosh import", "methods=['POST']) @login_required def add_tag(): # name = request.args.get('name', 0, type=int) response = {\"status\":", "os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] = \"Success\" result[\"url\"] = \"/\"+ UPLOAD_FOLDER +", "has already exists!\" return make_response(json.dumps(response)) @api.route(\"/search/<string:keyword>\") def search(keyword): res = search_helper.search(keyword) data =", "allowed_file(file.filename): filename = secure_filename(file.filename) save_path = os.path.join(base_dir,UPLOAD_FOLDER,filename) file.save(save_path) result[\"success\"] = 1 result[\"message\"] =", "response['tag'] = res response[\"status\"] = 200 else: response[\"msg\"] = \"tag has already exists!\"", "data[\"result\"] = res return jsonify(data) def allowed_file(filename): return '.' in filename and \\", "res = {\"id\": tag.id, \"name\": tag.name} response['tag'] = res response[\"status\"] = 200 else:", "return jsonify(result) file = request.files['editormd-image-file'] if file.filename == '': result[\"message\"] = \"No selected", "login_required import json from werkzeug.utils import secure_filename import os from app.models.fragment import Fragment" ]
[ "& 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\"", "OK\") return True self.logger.info(\"UPDI not OK - reinitialisation required\") return False def ldcs(self,", "self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading CS STATUSA", "self.check(): # Send double break if all is not well, and re-check self.updi_phy.send_double_break()", "key\") if len(key) != 8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY", "self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def", "= mode def init(self): \"\"\" Set the inter-byte delay bit and disable collision", "- reinitialisation required\") return False def ldcs(self, address): \"\"\" Load data from Control/Status", "Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response", "self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using", "response = self.updi_phy.receive(1) if len(response) != 1: # Todo - flag error return", "[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) &", "collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self):", "from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD", "self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1)", ">> 8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST", ">> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address", "# Send double break if all is not well, and re-check self.updi_phy.send_double_break() self.init()", "constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF, (address >>", "constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self,", "< len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] !=", "word value to the pointer location with pointer post-increment Disable acks when we", "self.init() # Check if not self.check(): # Send double break if all is", "\"\"\" Store data to the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to", "we do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self,", "handles the UPDI data protocol within the device \"\"\" def __init__(self, comport, baud):", "[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) &", "self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response)", "a value to the repeat counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8)", "<< size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size]) self.updi_phy.send(list(reversed(list(key))))", "raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode", "constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0]", "= ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature", "protocol within the device \"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") #", "[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) &", "a single byte value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address))", "8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a single", "[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) &", "repeats): \"\"\" Store a value to the repeat counter \"\"\" if (repeats -", "the pointer location with pointer post-increment Disable acks when we do this, to", "the pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST |", "to the pointer location with pointer post-increment Disable acks when we do this,", "\"\"\" UPDI data link class handles the UPDI data protocol within the device", "address, value): \"\"\" Store a 16-bit word value directly to a 16/24-bit address", "connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() # Check if not", "\"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\" Write", "the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST |", "if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value", "constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set the pointer location", "0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS |", "UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() # Check", "a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS |", "\"\"\" Load a 16-bit word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from", "acks enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD)", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self,", "\"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response =", ">> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address", "delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1", "constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def", "value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS |", "[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) &", "acks when we do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon", "address): \"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS", "OK - reinitialisation required\") return False def ldcs(self, address): \"\"\" Load data from", "| constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF, (address >> 16)", "constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if", "the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD |", "\"\"\" Loads a number of bytes from the pointer location with pointer post-increment", "0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit word directly from", "a 16-bit word value from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16", "\"\"\" self.logger.info(\"Writing key\") if len(key) != 8 << size: raise Exception(\"Invalid KEY length!\")", "Create a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init()", "raise Exception(\"Error with st\") def st16(self, address, value): \"\"\" Store a 16-bit word", "(address & 0x0F), value]) def ld(self, address): \"\"\" Load a single byte direct", "self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >>", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >> 8) &", "with acks enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off.", "constants class UpdiDatalink(object): \"\"\" UPDI data link class handles the UPDI data protocol", "st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0]", "address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 |", "ld_ptr_inc(self, size): \"\"\" Loads a number of bytes from the pointer location with", "value): \"\"\" Store a 16-bit word value directly to a 16/24-bit address \"\"\"", "0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 |", "& 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF,", "data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address &", "constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >> 8) & 0xFF,", "Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address &", "\"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,", "\"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create a UPDI physical", "import time from updi.physical import UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\"", "if len(key) != 8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY |", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address, value): \"\"\" Store", "with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or", "error return 0x00 return response[0] def stcs(self, address, value): \"\"\" Store a value", "data[0]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise", "0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1", "constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading CS", "Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1", "location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS |", "1: # Todo - flag error return 0x00 return response[0] def stcs(self, address,", "| constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) return", "self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI", "st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to the pointer location with pointer", "inter-byte delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA,", "set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self): \"\"\" Set the", "<reponame>leonerd/pyupdi \"\"\" Link layer in UPDI protocol stack \"\"\" import logging import time", "if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address", "# Todo - flag error return 0x00 return response[0] def stcs(self, address, value):", "protocol stack \"\"\" import logging import time from updi.physical import UpdiPhysical import updi.constants", "class handles the UPDI data protocol within the device \"\"\" def __init__(self, comport,", "if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address", "with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to the pointer location with", "8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit word", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to", "physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() # Check if", "the inter-byte delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)", "raise Exception(\"ACK error with st_ptr_inc\") n = 1 while n < len(data): self.updi_phy.send([data[n]])", "(address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,", "& 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit word directly", "self.use24bit = mode def init(self): \"\"\" Set the inter-byte delay bit and disable", "key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8 <<", "= self.updi_phy.receive(1) if len(response) != 1: # Todo - flag error return 0x00", "constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if", "True self.logger.info(\"UPDI not OK - reinitialisation required\") return False def ldcs(self, address): \"\"\"", "0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def key(self, size,", "Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats", "| (address & 0x0F), value]) def ld(self, address): \"\"\" Load a single byte", "UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data link class handles", "constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA,", "[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >> 8) &", "constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) #", "1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon | (1 <<", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8)", "16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24", "1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read the", "0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address", "(address >> 8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC,", "[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) &", "if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def", "st\") def st16(self, address, value): \"\"\" Store a 16-bit word value directly to", "Load a 16-bit word value from the pointer location with pointer post-increment \"\"\"", "constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF, (address >>", "(address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or", "with st\") def ld_ptr_inc(self, size): \"\"\" Loads a number of bytes from the", "self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self,", "with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,", "ld16(self, address): \"\"\" Load a 16-bit word directly from a 16/24-bit address \"\"\"", "expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value", "location with pointer post-increment Disable acks when we do this, to reduce latency.", "else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >>", "| constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set", "| constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] !=", "to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if", "\"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])", "constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF, (address >> 16) &", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self,", "Link layer in UPDI protocol stack \"\"\" import logging import time from updi.physical", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store", "return self.updi_phy.sib() def key(self, size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\")", "of bytes from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\")", "Store a single byte value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to", "constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response =", "\"\"\" Store a single byte value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST", "| constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1)", "\"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1)", "ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected.", "= logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud)", "| constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF, (address", "\"\"\" Load a 16-bit word value from the pointer location with pointer post-increment", "| constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value", "if all is not well, and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise", "a 16-bit word value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address))", "a number of bytes from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8", "raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response) !=", "0x00 return response[0] def stcs(self, address, value): \"\"\" Store a value to Control/Status", "constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a", "[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) &", "from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read the SIB", "initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self):", "self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1) if", "byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send(", "a 16-bit word value to the pointer location with pointer post-increment Disable acks", "reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with", "self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit", "st\") def ld_ptr_inc(self, size): \"\"\" Loads a number of bytes from the pointer", "st_ptr(self, address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit:", "if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address", "reinitialisation required\") return False def ldcs(self, address): \"\"\" Load data from Control/Status space", "ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self,", "re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode):", "\"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,", "mode def init(self): \"\"\" Set the inter-byte delay bit and disable collision detection", "!= constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n = 1 while n <", "disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No", "Store a value to the repeat counter \"\"\" if (repeats - 1) >", "0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address", "self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address", "(address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,", ">> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0]", "error with st_ptr_inc\") n = 1 while n < len(data): self.updi_phy.send([data[n]]) response =", "0x0F), value]) def ld(self, address): \"\"\" Load a single byte direct from a", "with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16])", "response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a", "to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff =", ">> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address", "self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >>", "UpdiPhysical(comport, baud) # Initialise self.init() # Check if not self.check(): # Send double", "| constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\" return", "def ldcs(self, address): \"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address))", ">> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address", "address): \"\"\" Load a 16-bit word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if", "def read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def key(self, size, key):", "1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC,", "(address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,", "def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value from the pointer location", "self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\")", "counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads a number of", "pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC", "0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise", "detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\"", "location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |", "> constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT", "UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\")", "Todo - flag error return 0x00 return response[0] def stcs(self, address, value): \"\"\"", "Loads a number of bytes from the pointer location with pointer post-increment \"\"\"", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def ld(self, address): \"\"\" Load a", "16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address &", "pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS", "post-increment Disable acks when we do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to", "a 16-bit word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if", "(Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data)", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1", "Store a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC,", "in UPDI protocol stack \"\"\" import logging import time from updi.physical import UpdiPhysical", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\" Store", "\"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words <<", "UPDI data protocol within the device \"\"\" def __init__(self, comport, baud): self.logger =", "off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |", "<< constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading CS STATUSA \"\"\" if", "with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8])", "constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2)", ">> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF])", "the SIB \"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\" Write a key", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data):", "self.updi_phy.receive(1) if len(response) != 1: # Todo - flag error return 0x00 return", "the UPDI data protocol within the device \"\"\" def __init__(self, comport, baud): self.logger", "Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF]) response =", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >> 8)", "(value >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or", "data): \"\"\" Store data to the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8", "init(self): \"\"\" Set the inter-byte delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB,", "| constants.UPDI_DATA_24, address & 0xFF, (address >> 8) & 0xFF, (address >> 16)", "\"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8 << size:", "to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS", "def st(self, address, value): \"\"\" Store a single byte value directly to a", "address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address):", "(1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff)", "constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) response =", "self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with", "repeat counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\")", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n = 1", "constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF,", "SIB \"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\" Write a key \"\"\"", "constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF,", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address):", "self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value to the repeat counter", "def stcs(self, address, value): \"\"\" Store a value to Control/Status space \"\"\" self.logger.info(\"STCS", "from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words):", "response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK", "| constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF, (address >> 16)", "(address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,", "# No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\"", "\"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing", "{0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self):", "constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF,", "repeats & 0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def", "0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\"", "constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib()", "UpdiDatalink(object): \"\"\" UPDI data link class handles the UPDI data protocol within the", "a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS", "import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data link class handles the", "data): \"\"\" Store a 16-bit word value to the pointer location with pointer", "to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address &", "byte value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit:", "constants.UPDI_STCS | (address & 0x0F), value]) def ld(self, address): \"\"\" Load a single", "count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF])", "1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value to the pointer", "& 0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def key(self,", "constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\"", "to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def ld(self, address):", "when we do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon =", "\"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,", "if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def", "16-bit word value from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from", "Send double break if all is not well, and re-check self.updi_phy.send_double_break() self.init() if", "0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF,", "self.logger = logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport,", "0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address", "constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read the SIB \"\"\"", "self.updi_phy.send(data) # No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats):", "is not well, and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation", "n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0]", "[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) &", "address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def ld(self, address): \"\"\" Load", "*ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response)", "| constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. # Re-enable acks", "self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self): \"\"\" Set the inter-byte delay", "self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK - reinitialisation required\") return False", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address, value):", "import UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data link class", "and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self,", "read_sib(self): \"\"\" Read the SIB \"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\"", "\"\"\" Store a 16-bit word value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST", "-= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\" Read", "# with acks enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks", "0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF,", "single byte value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if", "signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) #", "if not self.check(): # Send double break if all is not well, and", "return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit word directly from a", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response =", "<< constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC,", "| constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon)", "16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address &", "ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature disable)", "st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value", "link class handles the UPDI data protocol within the device \"\"\" def __init__(self,", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1 def", "[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) &", "\"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK", "\"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS |", "Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to the pointer location", "comport, baud): self.logger = logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False self.updi_phy", "constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response =", "self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self,", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to the", "if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address", "acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value to the repeat", "& 0x0F), value]) def ld(self, address): \"\"\" Load a single byte direct from", "| constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set the pointer", "*ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon", "16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address &", "st_ptr_inc(self, data): \"\"\" Store data to the pointer location with pointer post-increment \"\"\"", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads", "ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value from the pointer location with", "a key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8 << size: raise Exception(\"Invalid", "Store a 16-bit word value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to", "& 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF,", "do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 <<", "1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading CS STATUSA \"\"\"", "ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\"", "def ld16(self, address): \"\"\" Load a 16-bit word directly from a 16/24-bit address", "st\") self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF]) response = self.updi_phy.receive(1) if", "ld(self, address): \"\"\" Load a single byte direct from a 16/24-bit address \"\"\"", "class UpdiDatalink(object): \"\"\" UPDI data link class handles the UPDI data protocol within", "# (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] )", "raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF]) response", ">> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit", "& 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) !=", "constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set the", "| constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def", "data to the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC,", "constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value from", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n", "!= 8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY |", "return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value from the", "1 while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1", "a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() #", "!= 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK - reinitialisation required\")", "CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI", "post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response", "as constants class UpdiDatalink(object): \"\"\" UPDI data link class handles the UPDI data", "self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n +=", "value to the pointer location with pointer post-increment Disable acks when we do", "self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1)", "pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return", "double break if all is not well, and re-check self.updi_phy.send_double_break() self.init() if not", "st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value to the pointer location with", "def check(self): \"\"\" Check UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) !=", "& 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF,", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data):", "8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS |", "loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True", "16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address &", "required\") return False def ldcs(self, address): \"\"\" Load data from Control/Status space \"\"\"", "disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def", "from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])", "return 0x00 return response[0] def stcs(self, address, value): \"\"\" Store a value to", "time from updi.physical import UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI", "all is not well, and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI", "address, value): \"\"\" Store a single byte value directly to a 16/24-bit address", "(address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a", "constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. # Re-enable", "constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:", "self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >>", "& 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF,", "constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF])", "<< constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading", "if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address", "& 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:", "updi.physical import UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data link", "response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error", "a single byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if", "mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self): \"\"\" Set the inter-byte", "bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 <<", "address, value): \"\"\" Store a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to", "| constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) response", "# Initialise self.init() # Check if not self.check(): # Send double break if", "Store data to the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\")", "if len(response) != 1: # Todo - flag error return 0x00 return response[0]", "the device \"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create a", "\"\"\" Store a value to the repeat counter \"\"\" if (repeats - 1)", "constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF])", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8)", "def ld_ptr_inc(self, size): \"\"\" Loads a number of bytes from the pointer location", "address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send(", "1) def st_ptr(self, address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to ptr\")", "Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address", "single byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit:", "0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address", "constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST", "self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff", "def ld(self, address): \"\"\" Load a single byte direct from a 16/24-bit address", "def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self): \"\"\" Set", "words): \"\"\" Load a 16-bit word value from the pointer location with pointer", "not self.check(): # Send double break if all is not well, and re-check", "def repeat(self, repeats): \"\"\" Store a value to the repeat counter \"\"\" if", "not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit", "key(self, size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if len(key) !=", "0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1) if len(response) !=", "stcs(self, address, value): \"\"\" Store a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X}", "constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word", "= self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error", "self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address", "\"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size):", "while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1 or", "False def ldcs(self, address): \"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from", "value): \"\"\" Store a single byte value directly to a 16/24-bit address \"\"\"", "from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD", "value to the repeat counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise", "else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >>", "return False def ldcs(self, address): \"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS", "st16(self, address, value): \"\"\" Store a 16-bit word value directly to a 16/24-bit", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address, value): \"\"\"", "__init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False", "self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\")", "Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value to the", "| constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) != 1 or", "\"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats))", "Exception(\"Error with st\") def st16(self, address, value): \"\"\" Store a 16-bit word value", "<< 1) def st_ptr(self, address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to", "Set the inter-byte delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 <<", "constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA)", "& 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16", "by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return", "8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] !=", "Read the SIB \"\"\" return self.updi_phy.sib() def key(self, size, key): \"\"\" Write a", "bytes from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC,", "& 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address, value):", "this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT", "= 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon | (1", "Load a single byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address))", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF,", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response", "0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF,", "& 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF,", "with pointer post-increment Disable acks when we do this, to reduce latency. \"\"\"", "self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\")", ") self.updi_phy.send(data) # No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self,", "if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\")", "self.updi_phy.sib() def key(self, size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if", "layer in UPDI protocol stack \"\"\" import logging import time from updi.physical import", "from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1) if len(response)", "# Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value to", "constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n = 1 while n < len(data):", "\"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def", "0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 |", "8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS |", "16-bit word value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response)", "data protocol within the device \"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\")", "logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) #", "baud) # Initialise self.init() # Check if not self.check(): # Send double break", "check(self): \"\"\" Check UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0:", "(address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,", "else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >>", "def st_ptr_inc(self, data): \"\"\" Store data to the pointer location with pointer post-increment", "8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])", "device \"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create a UPDI", "location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |", "value from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC,", "constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF])", "data link class handles the UPDI data protocol within the device \"\"\" def", "acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC", "\"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check", "constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self,", "& 0xFF, (value >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) !=", "pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC", "pointer location with pointer post-increment Disable acks when we do this, to reduce", "directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC,", "| constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >> 8) & 0xFF, (address", "constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF, (address >> 16) &", "Write a key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8 << size: raise", "self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a single byte value directly to", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8)", "updi\") self.use24bit = mode def init(self): \"\"\" Set the inter-byte delay bit and", "self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address", "\"\"\" Store a 16-bit word value to the pointer location with pointer post-increment", "(repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -=", "latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value", "return True self.logger.info(\"UPDI not OK - reinitialisation required\") return False def ldcs(self, address):", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n = 1 while n", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\"", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n =", "repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def read_sib(self): \"\"\"", "location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |", "to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) !=", "16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24", "space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F),", "size): \"\"\" Loads a number of bytes from the pointer location with pointer", "value]) def ld(self, address): \"\"\" Load a single byte direct from a 16/24-bit", "the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD |", "stack \"\"\" import logging import time from updi.physical import UpdiPhysical import updi.constants as", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8)", "ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF,", "Store a 16-bit word value to the pointer location with pointer post-increment Disable", "def key(self, size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if len(key)", "= self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with", "return response[0] def stcs(self, address, value): \"\"\" Store a value to Control/Status space", "Exception(\"ACK error with st_ptr_inc\") n = 1 while n < len(data): self.updi_phy.send([data[n]]) response", "Initialise self.init() # Check if not self.check(): # Send double break if all", "to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT #", "pointer post-increment Disable acks when we do this, to reduce latency. \"\"\" self.logger.info(\"ST16", "raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\" Store data to the pointer", "| (address & 0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1: # Todo", "enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) #", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >> 8)", "n = 1 while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response)", "0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK - reinitialisation required\") return", "0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send(", "space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response =", "direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC,", "with st\") self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF]) response = self.updi_phy.receive(1)", "n += 1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value to", "else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >>", "constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF,", "and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT)", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >>", "self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit =", "<< constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT)", "| constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) return", "Check UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init", "(address >> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store", "repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats &", "Set the pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST", "# acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST |", "+= 1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value to the", "post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]) return self.updi_phy.receive(words", "from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address &", "directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC,", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8)", "to the pointer location with pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST", "ctrla_ackon) def repeat(self, repeats): \"\"\" Store a value to the repeat counter \"\"\"", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address, value): \"\"\" Store a", "0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load", "self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE, repeats & 0xFF]) def", "self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK - reinitialisation", "word value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit:", "0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a single byte value", "constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >> 8) & 0xFF, (address >>", "pointer post-increment \"\"\" self.logger.info(\"ST8 to *ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]])", "to the repeat counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads a", "from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address &", "constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF])", "the repeat counter \"\"\" if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat", "UPDI protocol stack \"\"\" import logging import time from updi.physical import UpdiPhysical import", "def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create a UPDI physical connection", "response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1)", "a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS |", "1 << constants.UPDI_CTRLB_CCDETDIS_BIT) self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT) def check(self): \"\"\" Check UPDI by", "| constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2) def", "return self.updi_phy.receive(words << 1) def st_ptr(self, address): \"\"\" Set the pointer location \"\"\"", "constants.UPDI_DATA_24, address & 0xFF, (address >> 8) & 0xFF, (address >> 16) &", "Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads a number of bytes from", "constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT |", "not well, and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\")", "8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST |", "if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats", "ldcs(self, address): \"\"\" Load data from Control/Status space \"\"\" self.logger.info(\"LDCS from 0x{0:02X}\".format(address)) self.updi_phy.send([constants.UPDI_PHY_SYNC,", "st(self, address, value): \"\"\" Store a single byte value directly to a 16/24-bit", "& 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS", "constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8, data[0]]) response = self.updi_phy.receive(1) if len(response) != 1", "self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address & 0xFF, (address >>", "| constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response", "\"\"\" self.logger.info(\"ST16 to *ptr++\") ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled.", ">> 8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS", "updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data link class handles the UPDI", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1:", "or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"ACK error with st_ptr_inc\") n = 1 while", "Disable acks when we do this, to reduce latency. \"\"\" self.logger.info(\"ST16 to *ptr++\")", "len(response) != 1: # Todo - flag error return 0x00 return response[0] def", "def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word value to the pointer location", "self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() # Check if not self.check(): #", "word value from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\")", "size, key): \"\"\" Write a key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8", "with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit word", "1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr\") def st_ptr_inc(self, data): \"\"\"", "from updi.physical import UpdiPhysical import updi.constants as constants class UpdiDatalink(object): \"\"\" UPDI data", "| constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF]) response", "| constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF, (address", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\"", "key \"\"\" self.logger.info(\"Writing key\") if len(key) != 8 << size: raise Exception(\"Invalid KEY", "\"\"\" Link layer in UPDI protocol stack \"\"\" import logging import time from", "value): \"\"\" Store a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value,", "STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not", "if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st_ptr_inc\") n", "import logging import time from updi.physical import UpdiPhysical import updi.constants as constants class", "len(response) != 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value &", "well, and re-check self.updi_phy.send_double_break() self.init() if not self.check(): raise Exception(\"UPDI initialisation failed\") def", "self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address", "16-bit word value to the pointer location with pointer post-increment Disable acks when", "No response expected. # Re-enable acks self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon) def repeat(self, repeats): \"\"\" Store", "Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def", "address & 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response)", "ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled. ctrla_ackoff = ctrla_ackon |", "response[0] def stcs(self, address, value): \"\"\" Store a value to Control/Status space \"\"\"", "16-bit word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit:", "def st16(self, address, value): \"\"\" Store a 16-bit word value directly to a", "len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:", "ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response", "\"\"\" Set the inter-byte delay bit and disable collision detection \"\"\" self.stcs(constants.UPDI_CS_CTRLB, 1", "self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise self.init() # Check if not self.check():", "0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def ld(self, address): \"\"\"", "from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8)", "0xFF, (value >> 8) & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1", "pointer post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return", "post-increment \"\"\" self.logger.info(\"LD8 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size)", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF])", "with st\") def st16(self, address, value): \"\"\" Store a 16-bit word value directly", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response expected. #", "& 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF]) else:", "!= 1 or response[0] != constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address,", "else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >>", "raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads a number of bytes", "= 1 while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if len(response) !=", "st_ptr_inc\") n = 1 while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1) if", "0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1: # Todo - flag error", "word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send(", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8)", "| constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit", "# Check if not self.check(): # Send double break if all is not", "0x{0:02X} to 0x{1:02X}\".format(value, address)) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value]) def ld(self,", "\"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,", "0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address", "& 0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a single byte", "self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16] ) self.updi_phy.send(data) # No response", "\"\"\" import logging import time from updi.physical import UpdiPhysical import updi.constants as constants", "(RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff) self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_16]", "constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16, address & 0xFF, (address >> 8) & 0xFF])", "Load a 16-bit word directly from a 16/24-bit address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address))", ">> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a", "to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address &", "address \"\"\" self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 |", "address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])", "self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16, address & 0xFF, (address >>", "Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\" Store a 16-bit", "def init(self): \"\"\" Set the inter-byte delay bit and disable collision detection \"\"\"", "if not self.check(): raise Exception(\"UPDI initialisation failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\")", "failed\") def set_24bit_updi(self, mode): self.logger.info(\"Using 24-bit updi\") self.use24bit = mode def init(self): \"\"\"", "logging import time from updi.physical import UpdiPhysical import updi.constants as constants class UpdiDatalink(object):", "return self.updi_phy.receive(2) def st(self, address, value): \"\"\" Store a single byte value directly", "16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address &", "- flag error return 0x00 return response[0] def stcs(self, address, value): \"\"\" Store", "baud): self.logger = logging.getLogger(\"link\") # Create a UPDI physical connection self.use24bit=False self.updi_phy =", "value directly to a 16/24-bit address \"\"\" self.logger.info(\"ST to 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send(", "raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1 self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,", "within the device \"\"\" def __init__(self, comport, baud): self.logger = logging.getLogger(\"link\") # Create", "!= 1: # Todo - flag error return 0x00 return response[0] def stcs(self,", "\"\"\" Check UPDI by loading CS STATUSA \"\"\" if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI", "constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(1)[0]", "len(key) != 8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY", "& 0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1: # Todo - flag", "self.logger.info(\"Writing key\") if len(key) != 8 << size: raise Exception(\"Invalid KEY length!\") self.updi_phy.send([constants.UPDI_PHY_SYNC,", "self.updi_phy.send([value & 0xFF]) response = self.updi_phy.receive(1) if len(response) != 1 or response[0] !=", "self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC | constants.UPDI_DATA_8]) return self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load", "init OK\") return True self.logger.info(\"UPDI not OK - reinitialisation required\") return False def", "raise Exception(\"Error with st_ptr_inc\") n += 1 def st_ptr_inc16(self, data): \"\"\" Store a", "- 1) > constants.UPDI_MAX_REPEAT_SIZE: raise Exception(\"Invalid repeat count!\") self.logger.info(\"Repeat {0:d}\".format(repeats)) repeats -= 1", "# Create a UPDI physical connection self.use24bit=False self.updi_phy = UpdiPhysical(comport, baud) # Initialise", "(address & 0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1: # Todo -", "0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address & 0xFF,", "self.updi_phy.receive(size) def ld_ptr_inc16(self, words): \"\"\" Load a 16-bit word value from the pointer", "constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def st16(self, address, value): \"\"\" Store a 16-bit", "!= constants.UPDI_PHY_ACK: raise Exception(\"Error with st\") def ld_ptr_inc(self, size): \"\"\" Loads a number", "\"\"\" Store a value to Control/Status space \"\"\" self.logger.info(\"STCS 0x{0:02X} to 0x{1:02X}\".format(value, address))", "with st_ptr_inc\") n = 1 while n < len(data): self.updi_phy.send([data[n]]) response = self.updi_phy.receive(1)", "repeat(self, repeats): \"\"\" Store a value to the repeat counter \"\"\" if (repeats", "self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16, address & 0xFF, (address >> 8)", ">> 8) & 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS", "to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24, address &", "address): \"\"\" Load a single byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD", "self.logger.info(\"UPDI not OK - reinitialisation required\") return False def ldcs(self, address): \"\"\" Load", "pointer location with pointer post-increment \"\"\" self.logger.info(\"LD16 from ptr++\") self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC", "flag error return 0x00 return response[0] def stcs(self, address, value): \"\"\" Store a", "& 0xFF, (address >> 16) & 0xFF]) else: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16", "\"\"\" Load a single byte direct from a 16/24-bit address \"\"\" self.logger.info(\"LD from", "constants.UPDI_LDCS | (address & 0x0F)]) response = self.updi_phy.receive(1) if len(response) != 1: #", "if self.ldcs(constants.UPDI_CS_STATUSA) != 0: self.logger.info(\"UPDI init OK\") return True self.logger.info(\"UPDI not OK -", "address & 0xFF, (address >> 8) & 0xFF]) return self.updi_phy.receive(2) def st(self, address,", "= UpdiPhysical(comport, baud) # Initialise self.init() # Check if not self.check(): # Send", "UPDI data link class handles the UPDI data protocol within the device \"\"\"", "number of bytes from the pointer location with pointer post-increment \"\"\" self.logger.info(\"LD8 from", "| (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD) # (Response signature disable) self.stcs(constants.UPDI_CS_CTRLA,", "not OK - reinitialisation required\") return False def ldcs(self, address): \"\"\" Load data", "self.logger.info(\"LD from 0x{0:06X}\".format(address)) if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8, address", "\"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to ptr\") if self.use24bit: self.updi_phy.send( [constants.UPDI_PHY_SYNC,", "24-bit updi\") self.use24bit = mode def init(self): \"\"\" Set the inter-byte delay bit", "| constants.UPDI_DATA_8, address & 0xFF, (address >> 8) & 0xFF]) response = self.updi_phy.receive(1)", "Check if not self.check(): # Send double break if all is not well,", "def st_ptr(self, address): \"\"\" Set the pointer location \"\"\" self.logger.info(\"ST to ptr\") if", "break if all is not well, and re-check self.updi_phy.send_double_break() self.init() if not self.check():", "self.updi_phy.receive(1)[0] def ld16(self, address): \"\"\" Load a 16-bit word directly from a 16/24-bit" ]
[ "if proc.wait() != 0: print(\"WARNING: Process with tid {} exit with code {}!\".format(", "= json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid =", "open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process", "open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait()", "with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it so", "stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0:", "exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it", "<filename>lib/launch_scripts/ARCHER/ptf_worker.py #!/usr/bin/env python2 # Needs to run with ARCHER's Python version. \"\"\" Task-farm", "python2 # Needs to run with ARCHER's Python version. \"\"\" Task-farm worker script.", "mpi_rank < 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path,", "\"\"\" Task-farm worker script. \"\"\" import sys import json import subprocess def main(argv):", "json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank", "= int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank < 0 or mpi_rank", "def main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1", "tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb')", "open(json_path, 'r') as json_file: processes = json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers):", "import json import subprocess def main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank", "= processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb')", "parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes = json.load(json_file)", "json_file: processes = json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd']", "argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank < 0", "proc.wait() != 0: print(\"WARNING: Process with tid {} exit with code {}!\".format( tid,", "< 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r')", "= subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with tid {}", "= processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd,", "worker script. \"\"\" import sys import json import subprocess def main(argv): json_path =", "script. \"\"\" import sys import json import subprocess def main(argv): json_path = argv[1]", ">= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes =", "rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes = json.load(json_file) for i in", "0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as", "range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path", "processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle,", "Process with tid {} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() #", "proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with tid", "ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\" import sys import json import", "subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with tid {} exit", "tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it so that __name__ ==", "version. \"\"\" Task-farm worker script. \"\"\" import sys import json import subprocess def", "stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)", "proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it so that __name__ == \"__main__\"", "= processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle =", "sys import json import subprocess def main(argv): json_path = argv[1] parallel_workers = int(argv[2])", "or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file:", "stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc =", "{} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run", "with open(json_path, 'r') as json_file: processes = json.load(json_file) for i in range(mpi_rank, len(processes),", "code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it so that", "len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path =", "parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr']", "cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle", "mpi_rank = int(argv[3]) - 1 if mpi_rank < 0 or mpi_rank >= parallel_workers:", "in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout']", "0: print(\"WARNING: Process with tid {} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close()", "processes[i]['cmd'] tid = processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path,", "int(argv[3]) - 1 if mpi_rank < 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected", "= processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle", "import sys import json import subprocess def main(argv): json_path = argv[1] parallel_workers =", "for i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path", "Needs to run with ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\" import", "tid {} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf", "subprocess def main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) -", "\"\"\" import sys import json import subprocess def main(argv): json_path = argv[1] parallel_workers", "stdout_handle.close() stderr_handle.close() # Does ptf run it so that __name__ == \"__main__\" ??", "i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid'] stdout_path =", "json import subprocess def main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank =", "# Needs to run with ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\"", "parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank < 0 or", "processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc", "{}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes = json.load(json_file) for i in range(mpi_rank,", "raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes = json.load(json_file) for", "import subprocess def main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3])", "stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle = open(stderr_path,", "with tid {} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does", "= int(argv[3]) - 1 if mpi_rank < 0 or mpi_rank >= parallel_workers: raise", "#!/usr/bin/env python2 # Needs to run with ARCHER's Python version. \"\"\" Task-farm worker", "stderr_handle.close() # Does ptf run it so that __name__ == \"__main__\" ?? sys.exit(main(sys.argv))", "if mpi_rank < 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with", "json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid = processes[i]['tid']", "stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with tid {} exit with", "{}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close() # Does ptf run it so that __name__", "'r') as json_file: processes = json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd", "int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank < 0 or mpi_rank >=", "= open(stdout_path, 'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if", "with ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\" import sys import json", "'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with", "!= 0: print(\"WARNING: Process with tid {} exit with code {}!\".format( tid, proc.poll()))", "'wb') stderr_handle = open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() !=", "stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING: Process with tid {} exit with code", "print(\"WARNING: Process with tid {} exit with code {}!\".format( tid, proc.poll())) stdout_handle.close() stderr_handle.close()", "Task-farm worker script. \"\"\" import sys import json import subprocess def main(argv): json_path", "1 if mpi_rank < 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank))", "as json_file: processes = json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd =", "mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes", "main(argv): json_path = argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1 if", "Exception(\"Unexpected rank! {}\".format(mpi_rank)) with open(json_path, 'r') as json_file: processes = json.load(json_file) for i", "= open(stderr_path, 'wb') proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle) if proc.wait() != 0: print(\"WARNING:", "run with ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\" import sys import", "processes = json.load(json_file) for i in range(mpi_rank, len(processes), parallel_workers): cmd = processes[i]['cmd'] tid", "to run with ARCHER's Python version. \"\"\" Task-farm worker script. \"\"\" import sys", "- 1 if mpi_rank < 0 or mpi_rank >= parallel_workers: raise Exception(\"Unexpected rank!", "Python version. \"\"\" Task-farm worker script. \"\"\" import sys import json import subprocess", "processes[i]['tid'] stdout_path = processes[i]['stdout'] stderr_path = processes[i]['stderr'] stdout_handle = open(stdout_path, 'wb') stderr_handle =", "= argv[1] parallel_workers = int(argv[2]) mpi_rank = int(argv[3]) - 1 if mpi_rank <" ]
[ "= cap.read() if ret == 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame)", "cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret,", "cap.read() if ret == 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if", "== 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if cv2.waitKey(1) & 0xFF==ord(\"q\"):break", "0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if cv2.waitKey(1) & 0xFF==ord(\"q\"):break cap.release()", "cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if ret ==", "while True: ret, frame = cap.read() if ret == 0: break frame =", "if ret == 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if cv2.waitKey(1)", "720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if ret == 0:", "800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if ret", "break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if cv2.waitKey(1) & 0xFF==ord(\"q\"):break cap.release() cv2.destroyAllWindows()", "frame = cap.read() if ret == 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\",", "import cv2 cv2.namedWindow(\"LiveCam\") cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720)", "ret == 0: break frame = cv2.flip(frame, 1) cv2.imshow(\"LiveCam\", frame) if cv2.waitKey(1) &", "cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if", "print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame =", "cv2.namedWindow(\"LiveCam\") cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4)))", "True: ret, frame = cap.read() if ret == 0: break frame = cv2.flip(frame,", "ret, frame = cap.read() if ret == 0: break frame = cv2.flip(frame, 1)", "print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if ret == 0: break", "cv2 cv2.namedWindow(\"LiveCam\") cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3)))", "print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read() if ret == 0: break frame", "<reponame>REISOGLU53/OpenCV-Python import cv2 cv2.namedWindow(\"LiveCam\") cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4,", "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while", "cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame", "= cv2.VideoCapture(0, cv2.CAP_DSHOW) print(\"weigth:\"+str(cap.get(3))) print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True:", "print(\"height:\"+str(cap.get(4))) cap.set(3, 800) cap.set(4, 720) print(\"weigth*:\"+str(cap.get(3))) print(\"height*:\"+str(cap.get(4))) while True: ret, frame = cap.read()" ]
[ "128], [1, 37, 17, 33, 8, 20, 86, 29, 134, 146, 48, 126,", "utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow() if", "plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\": config", "88, 99, 32, 7, 80, 61, 112, 2, 108, 6, 42, 94, 117,", "17, 33, 8, 20, 86, 29, 134, 146, 48, 126, 34, 105, 133,", "113, 27, 76, 114, 11, 135, 93, 109, 118, 102, 21, 53, 41,", "[1, 50, 58, 96, 140, 147, 142, 132, 70, 40, 30, 43, 95,", "from env.evrp import EVRPEnv from utils.config import read_config import numpy as np from", "25, 67, 100, 74, 89, 72, 10, 131], [1, 14, 110, 144, 51,", "46, 62, 65, 45, 12, 101, 75, 82, 63, 97, 146, 19, 91,", "try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\") env =", "13, 90, 66, 46, 62, 65, 45, 12, 101, 75, 82, 63, 97,", "import numpy as np from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf,", "49, 59, 47, 31, 122, 145, 88, 99, 32, 7, 80, 61, 112,", "from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow()", "62, 65, 45, 12, 101, 75, 82, 63, 97, 146, 19, 91, 26,", "20, 86, 29, 134, 146, 48, 126, 34, 105, 133, 24, 22, 124,", "69, 85, 129, 119, 55, 13, 90, 66, 46, 62, 65, 45, 12,", "130, 106, 139, 35, 68, 136, 60]] plt = plot_tours(env, tours, 123) plt.show()", "[1, 37, 17, 33, 8, 20, 86, 29, 134, 146, 48, 126, 34,", "80, 61, 112, 2, 108, 6, 42, 94, 117, 137], [1, 79, 87,", "110, 144, 51, 52, 39, 104, 64, 113, 27, 76, 114, 11, 135,", "from utils.config import read_config import numpy as np from env.worker import VectorizedEVRP from", "124, 84, 57, 92, 36, 98], [1, 116, 125, 49, 59, 47, 31,", "[[1, 69, 85, 129, 119, 55, 13, 90, 66, 46, 62, 65, 45,", "91, 26, 128], [1, 37, 17, 33, 8, 20, 86, 29, 134, 146,", "121, 143, 56, 138, 83, 15, 38, 123, 78, 23, 120, 81, 141,", "26, 128], [1, 37, 17, 33, 8, 20, 86, 29, 134, 146, 48,", "15, 38, 123, 78, 23, 120, 81, 141, 147, 16, 28, 107, 25,", "134, 146, 48, 126, 34, 105, 133, 24, 22, 124, 84, 57, 92,", "34, 105, 133, 24, 22, 124, 84, 57, 92, 36, 98], [1, 116,", "12, 101, 75, 82, 63, 97, 146, 19, 91, 26, 128], [1, 37,", "123, 78, 23, 120, 81, 141, 147, 16, 28, 107, 25, 67, 100,", "import read_config import numpy as np from env.worker import VectorizedEVRP from utils.plot import", "= read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119, 55,", "126, 34, 105, 133, 24, 22, 124, 84, 57, 92, 36, 98], [1,", "39, 104, 64, 113, 27, 76, 114, 11, 135, 93, 109, 118, 102,", "116, 125, 49, 59, 47, 31, 122, 145, 88, 99, 32, 7, 80,", "10, 131], [1, 14, 110, 144, 51, 52, 39, 104, 64, 113, 27,", "utils import try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\")", "146, 48, 126, 34, 105, 133, 24, 22, 124, 84, 57, 92, 36,", "EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119, 55, 13, 90, 66, 46,", "122, 145, 88, 99, 32, 7, 80, 61, 112, 2, 108, 6, 42,", "3, 18, 127], [1, 121, 143, 56, 138, 83, 15, 38, 123, 78,", "28, 107, 25, 67, 100, 74, 89, 72, 10, 131], [1, 14, 110,", "[1, 116, 125, 49, 59, 47, 31, 122, 145, 88, 99, 32, 7,", "40, 30, 43, 95, 4, 77, 130, 106, 139, 35, 68, 136, 60]]", "77, 130, 106, 139, 35, 68, 136, 60]] plt = plot_tours(env, tours, 123)", "read_config import numpy as np from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy,", "config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119,", "98], [1, 116, 125, 49, 59, 47, 31, 122, 145, 88, 99, 32,", "27, 76, 114, 11, 135, 93, 109, 118, 102, 21, 53, 41, 71,", "23, 120, 81, 141, 147, 16, 28, 107, 25, 67, 100, 74, 89,", "\"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69, 85, 129,", "8, 20, 86, 29, 134, 146, 48, 126, 34, 105, 133, 24, 22,", "55, 13, 90, 66, 46, 62, 65, 45, 12, 101, 75, 82, 63,", "74, 89, 72, 10, 131], [1, 14, 110, 144, 51, 52, 39, 104,", "81, 141, 147, 16, 28, 107, 25, 67, 100, 74, 89, 72, 10,", "47, 31, 122, 145, 88, 99, 32, 7, 80, 61, 112, 2, 108,", "2, 108, 6, 42, 94, 117, 137], [1, 79, 87, 9, 73, 103,", "93, 109, 118, 102, 21, 53, 41, 71, 5], [1, 50, 58, 96,", "115, 44, 3, 18, 127], [1, 121, 143, 56, 138, 83, 15, 38,", "76, 114, 11, 135, 93, 109, 118, 102, 21, 53, 41, 71, 5],", "58, 96, 140, 147, 142, 132, 70, 40, 30, 43, 95, 4, 77,", "92, 36, 98], [1, 116, 125, 49, 59, 47, 31, 122, 145, 88,", "38, 123, 78, 23, 120, 81, 141, 147, 16, 28, 107, 25, 67,", "32, 7, 80, 61, 112, 2, 108, 6, 42, 94, 117, 137], [1,", "18, 127], [1, 121, 143, 56, 138, 83, 15, 38, 123, 78, 23,", "19, 91, 26, 128], [1, 37, 17, 33, 8, 20, 86, 29, 134,", "utils.config import read_config import numpy as np from env.worker import VectorizedEVRP from utils.plot", "102, 21, 53, 41, 71, 5], [1, 50, 58, 96, 140, 147, 142,", "[1, 79, 87, 9, 73, 103, 54, 111, 115, 44, 3, 18, 127],", "import try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\") env", "convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow() if __name__ ==", "94, 117, 137], [1, 79, 87, 9, 73, 103, 54, 111, 115, 44,", "138, 83, 15, 38, 123, 78, 23, 120, 81, 141, 147, 16, 28,", "43, 95, 4, 77, 130, 106, 139, 35, 68, 136, 60]] plt =", "33, 8, 20, 86, 29, 134, 146, 48, 126, 34, 105, 133, 24,", "114, 11, 135, 93, 109, 118, 102, 21, 53, 41, 71, 5], [1,", "env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow", "36, 98], [1, 116, 125, 49, 59, 47, 31, 122, 145, 88, 99,", "29, 134, 146, 48, 126, 34, 105, 133, 24, 22, 124, 84, 57,", "import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf", "105, 133, 24, 22, 124, 84, 57, 92, 36, 98], [1, 116, 125,", "127], [1, 121, 143, 56, 138, 83, 15, 38, 123, 78, 23, 120,", "63, 97, 146, 19, 91, 26, 128], [1, 37, 17, 33, 8, 20,", "117, 137], [1, 79, 87, 9, 73, 103, 54, 111, 115, 44, 3,", "65, 45, 12, 101, 75, 82, 63, 97, 146, 19, 91, 26, 128],", "61, 112, 2, 108, 6, 42, 94, 117, 137], [1, 79, 87, 9,", "np from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils", "__name__ == \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69,", "EVRPEnv from utils.config import read_config import numpy as np from env.worker import VectorizedEVRP", "env = EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119, 55, 13, 90,", "71, 5], [1, 50, 58, 96, 140, 147, 142, 132, 70, 40, 30,", "7, 80, 61, 112, 2, 108, 6, 42, 94, 117, 137], [1, 79,", "53, 41, 71, 5], [1, 50, 58, 96, 140, 147, 142, 132, 70,", "109, 118, 102, 21, 53, 41, 71, 5], [1, 50, 58, 96, 140,", "107, 25, 67, 100, 74, 89, 72, 10, 131], [1, 14, 110, 144,", "as np from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from", "convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\":", "108, 6, 42, 94, 117, 137], [1, 79, 87, 9, 73, 103, 54,", "== \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69, 85,", "86, 29, 134, 146, 48, 126, 34, 105, 133, 24, 22, 124, 84,", "141, 147, 16, 28, 107, 25, 67, 100, 74, 89, 72, 10, 131],", "50, 58, 96, 140, 147, 142, 132, 70, 40, 30, 43, 95, 4,", "57, 92, 36, 98], [1, 116, 125, 49, 59, 47, 31, 122, 145,", "75, 82, 63, 97, 146, 19, 91, 26, 128], [1, 37, 17, 33,", "147, 16, 28, 107, 25, 67, 100, 74, 89, 72, 10, 131], [1,", "tours = [[1, 69, 85, 129, 119, 55, 13, 90, 66, 46, 62,", "78, 23, 120, 81, 141, 147, 16, 28, 107, 25, 67, 100, 74,", "11, 135, 93, 109, 118, 102, 21, 53, 41, 71, 5], [1, 50,", "try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours =", "from utils import try_import_tensorflow tf = try_import_tensorflow() if __name__ == \"__main__\": config =", "112, 2, 108, 6, 42, 94, 117, 137], [1, 79, 87, 9, 73,", "64, 113, 27, 76, 114, 11, 135, 93, 109, 118, 102, 21, 53,", "85, 129, 119, 55, 13, 90, 66, 46, 62, 65, 45, 12, 101,", "142, 132, 70, 40, 30, 43, 95, 4, 77, 130, 106, 139, 35,", "83, 15, 38, 123, 78, 23, 120, 81, 141, 147, 16, 28, 107,", "99, 32, 7, 80, 61, 112, 2, 108, 6, 42, 94, 117, 137],", "133, 24, 22, 124, 84, 57, 92, 36, 98], [1, 116, 125, 49,", "31, 122, 145, 88, 99, 32, 7, 80, 61, 112, 2, 108, 6,", "import EVRPEnv from utils.config import read_config import numpy as np from env.worker import", "145, 88, 99, 32, 7, 80, 61, 112, 2, 108, 6, 42, 94,", "[1, 121, 143, 56, 138, 83, 15, 38, 123, 78, 23, 120, 81,", "125, 49, 59, 47, 31, 122, 145, 88, 99, 32, 7, 80, 61,", "5], [1, 50, 58, 96, 140, 147, 142, 132, 70, 40, 30, 43,", "56, 138, 83, 15, 38, 123, 78, 23, 120, 81, 141, 147, 16,", "104, 64, 113, 27, 76, 114, 11, 135, 93, 109, 118, 102, 21,", "59, 47, 31, 122, 145, 88, 99, 32, 7, 80, 61, 112, 2,", "21, 53, 41, 71, 5], [1, 50, 58, 96, 140, 147, 142, 132,", "37, 17, 33, 8, 20, 86, 29, 134, 146, 48, 126, 34, 105,", "132, 70, 40, 30, 43, 95, 4, 77, 130, 106, 139, 35, 68,", "9, 73, 103, 54, 111, 115, 44, 3, 18, 127], [1, 121, 143,", "24, 22, 124, 84, 57, 92, 36, 98], [1, 116, 125, 49, 59,", "87, 9, 73, 103, 54, 111, 115, 44, 3, 18, 127], [1, 121,", "51, 52, 39, 104, 64, 113, 27, 76, 114, 11, 135, 93, 109,", "env.evrp import EVRPEnv from utils.config import read_config import numpy as np from env.worker", "143, 56, 138, 83, 15, 38, 123, 78, 23, 120, 81, 141, 147,", "44, 3, 18, 127], [1, 121, 143, 56, 138, 83, 15, 38, 123,", "79, 87, 9, 73, 103, 54, 111, 115, 44, 3, 18, 127], [1,", "131], [1, 14, 110, 144, 51, 52, 39, 104, 64, 113, 27, 76,", "135, 93, 109, 118, 102, 21, 53, 41, 71, 5], [1, 50, 58,", "45, 12, 101, 75, 82, 63, 97, 146, 19, 91, 26, 128], [1,", "70, 40, 30, 43, 95, 4, 77, 130, 106, 139, 35, 68, 136,", "96, 140, 147, 142, 132, 70, 40, 30, 43, 95, 4, 77, 130,", "numpy as np from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours", "tf = try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config)", "129, 119, 55, 13, 90, 66, 46, 62, 65, 45, 12, 101, 75,", "89, 72, 10, 131], [1, 14, 110, 144, 51, 52, 39, 104, 64,", "67, 100, 74, 89, 72, 10, 131], [1, 14, 110, 144, 51, 52,", "import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf = try_import_tensorflow() if __name__", "84, 57, 92, 36, 98], [1, 116, 125, 49, 59, 47, 31, 122,", "30, 43, 95, 4, 77, 130, 106, 139, 35, 68, 136, 60]] plt", "6, 42, 94, 117, 137], [1, 79, 87, 9, 73, 103, 54, 111,", "95, 4, 77, 130, 106, 139, 35, 68, 136, 60]] plt = plot_tours(env,", "16, 28, 107, 25, 67, 100, 74, 89, 72, 10, 131], [1, 14,", "4, 77, 130, 106, 139, 35, 68, 136, 60]] plt = plot_tours(env, tours,", "137], [1, 79, 87, 9, 73, 103, 54, 111, 115, 44, 3, 18,", "103, 54, 111, 115, 44, 3, 18, 127], [1, 121, 143, 56, 138,", "[1, 14, 110, 144, 51, 52, 39, 104, 64, 113, 27, 76, 114,", "82, 63, 97, 146, 19, 91, 26, 128], [1, 37, 17, 33, 8,", "= [[1, 69, 85, 129, 119, 55, 13, 90, 66, 46, 62, 65,", "= EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119, 55, 13, 90, 66,", "120, 81, 141, 147, 16, 28, 107, 25, 67, 100, 74, 89, 72,", "41, 71, 5], [1, 50, 58, 96, 140, 147, 142, 132, 70, 40,", "54, 111, 115, 44, 3, 18, 127], [1, 121, 143, 56, 138, 83,", "14, 110, 144, 51, 52, 39, 104, 64, 113, 27, 76, 114, 11,", "42, 94, 117, 137], [1, 79, 87, 9, 73, 103, 54, 111, 115,", "100, 74, 89, 72, 10, 131], [1, 14, 110, 144, 51, 52, 39,", "118, 102, 21, 53, 41, 71, 5], [1, 50, 58, 96, 140, 147,", "119, 55, 13, 90, 66, 46, 62, 65, 45, 12, 101, 75, 82,", "101, 75, 82, 63, 97, 146, 19, 91, 26, 128], [1, 37, 17,", "from env.worker import VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import", "if __name__ == \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1,", "22, 124, 84, 57, 92, 36, 98], [1, 116, 125, 49, 59, 47,", "144, 51, 52, 39, 104, 64, 113, 27, 76, 114, 11, 135, 93,", "48, 126, 34, 105, 133, 24, 22, 124, 84, 57, 92, 36, 98],", "147, 142, 132, 70, 40, 30, 43, 95, 4, 77, 130, 106, 139,", "= try_import_tensorflow() if __name__ == \"__main__\": config = read_config(\"config.json\") env = EVRPEnv(config.env_config) tours", "146, 19, 91, 26, 128], [1, 37, 17, 33, 8, 20, 86, 29,", "73, 103, 54, 111, 115, 44, 3, 18, 127], [1, 121, 143, 56,", "VectorizedEVRP from utils.plot import convert_plt_to_numpy, convert_plt_to_tf, plot_tours from utils import try_import_tensorflow tf =", "read_config(\"config.json\") env = EVRPEnv(config.env_config) tours = [[1, 69, 85, 129, 119, 55, 13,", "52, 39, 104, 64, 113, 27, 76, 114, 11, 135, 93, 109, 118,", "66, 46, 62, 65, 45, 12, 101, 75, 82, 63, 97, 146, 19,", "72, 10, 131], [1, 14, 110, 144, 51, 52, 39, 104, 64, 113,", "140, 147, 142, 132, 70, 40, 30, 43, 95, 4, 77, 130, 106,", "97, 146, 19, 91, 26, 128], [1, 37, 17, 33, 8, 20, 86,", "90, 66, 46, 62, 65, 45, 12, 101, 75, 82, 63, 97, 146,", "111, 115, 44, 3, 18, 127], [1, 121, 143, 56, 138, 83, 15," ]
[ "j == k * k: print i, j, k print i * j", "1000 - i - j if i and j and k and i", "j * j == k * k: print i, j, k print i", "= 1000 - i - j if i and j and k and", "i): k = 1000 - i - j if i and j and", "xrange(1000): for j in xrange(1000 - i): k = 1000 - i -", "k * k: print i, j, k print i * j * k", "k = 1000 - i - j if i and j and k", "+ j * j == k * k: print i, j, k print", "for j in xrange(1000 - i): k = 1000 - i - j", "i in xrange(1000): for j in xrange(1000 - i): k = 1000 -", "* i + j * j == k * k: print i, j,", "== k * k: print i, j, k print i * j *", "and j and k and i * i + j * j ==", "i - j if i and j and k and i * i", "in xrange(1000): for j in xrange(1000 - i): k = 1000 - i", "j if i and j and k and i * i + j", "i + j * j == k * k: print i, j, k", "and k and i * i + j * j == k *", "for i in xrange(1000): for j in xrange(1000 - i): k = 1000", "- i - j if i and j and k and i *", "and i * i + j * j == k * k: print", "- j if i and j and k and i * i +", "in xrange(1000 - i): k = 1000 - i - j if i", "* j == k * k: print i, j, k print i *", "i * i + j * j == k * k: print i,", "if i and j and k and i * i + j *", "j and k and i * i + j * j == k", "- i): k = 1000 - i - j if i and j", "j in xrange(1000 - i): k = 1000 - i - j if", "i and j and k and i * i + j * j", "xrange(1000 - i): k = 1000 - i - j if i and", "k and i * i + j * j == k * k:" ]
[ "of the TagKey that can only be set on creation. \" \"This data", "be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters, beginning", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean, to enforce --parent", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "argument for the TagKey or TagValue's parent to the parser. Args: parser: ArgumentInterceptor,", "any JSON or YAML file conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\"))", "\"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the TagKey", "name. The resource name should \" \"be in the form {resource_type}/{numeric_id}. The namespaced", "\"must be 1-63 characters, beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z])", "\" \"file, as is any JSON or YAML file conforming to the structure", "between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor, An", "TagValue. \" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the", "\" must be 1-63 characters, beginning and ending with an \" \"alphanumeric character", "\"Path to a local JSON or YAML formatted file containing a valid \"", "this file except in compliance with the License. # You may obtain a", "validation from the policy \" \"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser):", "LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "of the `get-iam-policy` command is a valid \" \"file, as is any JSON", "division from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base", "Boolean, to enforce --parent as a required flag. message: String, replacement help text", "conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue", "the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's description to", "purpose to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\",", "be 1-63 characters, beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with", "parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified,", "ANY KIND, either express or implied. # See the License for the specific", "\"should be in the form {org_id}/{short_name} where short_name \" \"must be 1-63 characters,", "in the form {org_id}/{short_name} where short_name \" \"must be 1-63 characters, beginning and", "\"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can only", "parser.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals", "help=(\"Tag value name or namespaced name. The name should \" \"be in the", "help=(\"User-assigned description of the TagKey or TagValue. \" \"Must not exceed 256 characters.\"))", "([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _ ), dots (.), and alphanumerics", "The field\" \" must be 1-63 characters, beginning and ending with an \"", "1-63 characters, beginning and ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to the parser. Args: parser:", "alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey or", "the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args: parser: ArgumentInterceptor,", "(-), underscores \" \"( _ ), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser):", "argument for the namespaced name or resource name to the parser. Args: parser:", "AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor, An argparse parser.", "parser. Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean, to enforce --parent as", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey or TagValue.", "An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the", "An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced", "name or namespaced name. The resource name should \" \"be in the form", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name", "from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser:", "OF ANY KIND, either express or implied. # See the License for the", "with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _", "parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or YAML formatted file", "is any JSON or YAML file conforming to the structure of \" \"a", "name should \" \"be in the form tagValues/{numeric_id}. The namespaced name \" \"should", "or TagValue's description to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "--parent as a required flag. message: String, replacement help text for flag. \"\"\"", "value name or namespaced name. The name should \" \"be in the form", "Args: parser: ArgumentInterceptor, An argparse parser. message: String, help text for flag. \"\"\"", "dashes (-), underscores \" \"( _ ), dots (.), and alphanumerics between.\")) def", "{org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters, beginning and ending \" \"with", "resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's description to the", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced", "set on creation. \" \"This data is validated by the policy system that", "underscores \" \"( _ ), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds", "\" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots", "with dashes (-), \" \"underscores ( _ ), dots (.), and alphanumerics between.", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name.", "flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent of the", "for defining CRM Tag arguments on a parser.\"\"\" from __future__ import absolute_import from", "parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the", "argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey", "([a-z0-9A-Z]) with dashes (-), underscores \" \"( _ ), dots (.), and alphanumerics", "parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced name", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or", "\"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser):", "data is validated by the policy system that corresponds\" \" to the purpose.\"))", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "friendly name of the TagKey or TagValue. The field\" \" must be 1-63", "__future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope", "AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's description to the parser. Args:", "the License. \"\"\"Utilities for defining CRM Tag arguments on a parser.\"\"\" from __future__", "{org_id}/{short_name} where short_name \" \"must be 1-63 characters, beginning and ending with an", "\" \"should be in the form {org_id}/{short_name} where short_name \" \"must be 1-63", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "specific language governing permissions and # limitations under the License. \"\"\"Utilities for defining", "\"\"\"Adds resource name argument for the namespaced name or resource name to the", "message): \"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor, An argparse parser. message:", "ArgumentInterceptor, An argparse parser. required: Boolean, to enforce --parent as a required flag.", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "\" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's", "choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can only be set on creation.", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource", "of the TagKey that can only be set on creation. \" \"Specifying this", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force", "import division from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import", "or TagValue's parent to the parser. Args: parser: ArgumentInterceptor, An argparse parser. required:", "\"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or YAML formatted", "to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args: parser:", "to set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=(", "valid \" \"policy. The output of the `get-iam-policy` command is a valid \"", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1,", "the form {resource_type}/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{short_name}", "parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey or", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "_ ), dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds", "The output of the `get-iam-policy` command is a valid \" \"file, as is", "), help=(\"Purpose data of the TagKey that can only be set on creation.", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to", "__future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser):", "required by applicable law or agreed to in writing, software # distributed under", "the TagKey that can only be set on creation. \" \"This data is", "and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ),", "applicable law or agreed to in writing, software # distributed under the License", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\",", "parser: ArgumentInterceptor, An argparse parser. message: String, help text for flag. \"\"\" parser.add_argument(", "spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the TagKey that can only be", "\"\"\"Adds argument for the TagKey's purpose to the parser. Args: parser: ArgumentInterceptor, An", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned", "the specific language governing permissions and # limitations under the License. \"\"\"Utilities for", "\"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey or TagValue. The field\"", "is validated by the policy system that corresponds\" \" to the purpose.\")) def", "or agreed to in writing, software # distributed under the License is distributed", "from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument", "(-), \" \"underscores ( _ ), dots (.), and alphanumerics between. \")) def", "AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "containing a valid \" \"policy. The output of the `get-iam-policy` command is a", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"],", "alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args: parser: ArgumentInterceptor,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "parser. message: String, help text for flag. \"\"\" parser.add_argument( \"--location\", metavar=\"LOCATION\", required=False, help=message)", "between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's", "namespaced name or resource name to the parser. Args: parser: ArgumentInterceptor, An argparse", "namespaced name. The resource name should \" \"be in the form {resource_type}/{numeric_id}. The", "on creation. \" \"This data is validated by the policy system that corresponds\"", "flag to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def", "beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \"", "or namespaced name. The resource name should \" \"be in the form {resource_type}/{numeric_id}.", "\"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \" \"( _ ), dots (.),", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "a local JSON or YAML formatted file containing a valid \" \"policy. The", "writing, software # distributed under the License is distributed on an \"AS IS\"", "import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor, an", "\" \"be in the form tagValues/{numeric_id}. The namespaced name \" \"should be in", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "field adds additional validation from the policy \" \"system that corresponds to the", "TagValue's parent to the parser. Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean,", "of the TagKey or TagValue. The field\" \" must be 1-63 characters, beginning", "-*- coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved.", "License. # You may obtain a copy of the License at # #", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource", "required=True, message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's parent to the parser.", "Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean, to enforce --parent as a", "metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource name should \" \"be in", "positional argument to parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\",", "beginning and ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \"", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\"))", "\" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \" \"( _ ), dots", "compliance with the License. # You may obtain a copy of the License", "googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor,", "can only be set on creation. \" \"Specifying this field adds additional validation", "async flag to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser)", "arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args:", "(.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args:", "of the TagKey or TagValue. \" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser):", "should \" \"be in the form {resource_type}/{numeric_id}. The namespaced name \" \"should be", "`get-iam-policy` command is a valid \" \"file, as is any JSON or YAML", "JSON or YAML formatted file containing a valid \" \"policy. The output of", "for the specific language governing permissions and # limitations under the License. \"\"\"Utilities", "argparse parser. message: String, help text for flag. \"\"\" parser.add_argument( \"--location\", metavar=\"LOCATION\", required=False,", "\"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the", "An argparse parser. message: String, help text for flag. \"\"\" parser.add_argument( \"--location\", metavar=\"LOCATION\",", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "name to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\",", "can only be set on creation. \" \"This data is validated by the", "An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or", "argument to parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\",", "namespaced name \" \"should be in the form {org_id}/{short_name} where short_name \" \"must", "def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's parent to", "data to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\",", "{resource_type}/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{short_name} where short_name", "replacement help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message", "that can only be set on creation. \" \"This data is validated by", "and ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores", "limitations under the License. \"\"\"Utilities for defining CRM Tag arguments on a parser.\"\"\"", "TagKey or TagValue. The field\" \" must be 1-63 characters, beginning and ending", "for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent of", "not use this file except in compliance with the License. # You may", "with dashes (-), underscores \" \"( _ ), dots (.), and alphanumerics between.\"))", "flag. message: String, replacement help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required,", "parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource name should \"", "character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots (.), and alphanumerics between.\"))", "the TagKey's purpose to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "License, Version 2.0 (the \"License\"); # you may not use this file except", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the", "\"\"\"Utilities for defining CRM Tag arguments on a parser.\"\"\" from __future__ import absolute_import", "the location. Args: parser: ArgumentInterceptor, An argparse parser. message: String, help text for", "parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds", "argparse parser. required: Boolean, to enforce --parent as a required flag. message: String,", "# you may not use this file except in compliance with the License.", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier", "\" \"underscores ( _ ), dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser,", "form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters, beginning and ending \"", "argument for the TagKey's or TagValue's description to the parser. Args: parser: ArgumentInterceptor,", "agreed to in writing, software # distributed under the License is distributed on", "parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey or TagValue. The", "force argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can", "of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's description", "an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the", "\"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument", "_ ), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to", "TagValue's description to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "(the \"License\"); # you may not use this file except in compliance with", "for the namespaced name or resource name to the parser. Args: parser: ArgumentInterceptor,", "# limitations under the License. \"\"\"Utilities for defining CRM Tag arguments on a", "for the TagKey or TagValue's parent to the parser. Args: parser: ArgumentInterceptor, An", "help=(\"Resource name or namespaced name. The resource name should \" \"be in the", "to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file to", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\",", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict(", "system that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to", "import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser.", "parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for", "TagKey's purpose data to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser.", "set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path", "# Unless required by applicable law or agreed to in writing, software #", "TagValue argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "by applicable law or agreed to in writing, software # distributed under the", "The resource name should \" \"be in the form {resource_type}/{numeric_id}. The namespaced name", "\"Specifying this field adds additional validation from the policy \" \"system that corresponds", "\"This data is validated by the policy system that corresponds\" \" to the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "namespaced name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must", "str}, max_length=1, ), help=(\"Purpose data of the TagKey that can only be set", "AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser. Args: parser: ArgumentInterceptor, An argparse", "language governing permissions and # limitations under the License. \"\"\"Utilities for defining CRM", "ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for", "parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource name", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\",", "enforce --parent as a required flag. message: String, replacement help text for flag.", "\" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser. Args:", "or YAML formatted file containing a valid \" \"policy. The output of the", "ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_),", "and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args: parser:", "# # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under", "file except in compliance with the License. # You may obtain a copy", "name should \" \"be in the form {resource_type}/{numeric_id}. The namespaced name \" \"should", "form {resource_type}/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{short_name} where", "The name should \" \"be in the form tagValues/{numeric_id}. The namespaced name \"", "required flag. message: String, replacement help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\",", "parser. required: Boolean, to enforce --parent as a required flag. message: String, replacement", "googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to", "\"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource name should", "message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's", "AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's parent to the", "License for the specific language governing permissions and # limitations under the License.", "def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args: parser: ArgumentInterceptor, An argparse", "(.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the", "\"\"\"Adds argument for the TagKey or TagValue's parent to the parser. Args: parser:", "AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\":", "to in writing, software # distributed under the License is distributed on an", "set on creation. \" \"Specifying this field adds additional validation from the policy", "An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def", "the TagKey that can only be set on creation. \" \"Specifying this field", "License. \"\"\"Utilities for defining CRM Tag arguments on a parser.\"\"\" from __future__ import", "base def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor, an argparse", "Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name", "with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \" \"( _", "An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data", "valid \" \"file, as is any JSON or YAML file conforming to the", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced name or resource", "argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource", "action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local", "def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser. Args: parser: ArgumentInterceptor, An", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the", "coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved. #", "that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the", "\" \"This data is validated by the policy system that corresponds\" \" to", "(.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args:", "checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file to set. Args:", "parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or YAML", "def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file to set. Args: parser:", "or implied. # See the License for the specific language governing permissions and", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\",", "form {org_id}/{short_name} where short_name \" \"must be 1-63 characters, beginning and ending with", "resource name should \" \"be in the form {resource_type}/{numeric_id}. The namespaced name \"", "def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args: parser: ArgumentInterceptor, An argparse", "def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's description to the parser.", "YAML formatted file containing a valid \" \"policy. The output of the `get-iam-policy`", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "\" \"policy. The output of the `get-iam-policy` command is a valid \" \"file,", "for the location. Args: parser: ArgumentInterceptor, An argparse parser. message: String, help text", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "the TagValue argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "__future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope", "argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\",", "( _ ), dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"):", "in the form {resource_type}/{numeric_id}. The namespaced name \" \"should be in the form", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "[Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser. Args: parser: ArgumentInterceptor,", "in writing, software # distributed under the License is distributed on an \"AS", "ArgumentInterceptor, An argparse parser. message: String, help text for flag. \"\"\" parser.add_argument( \"--location\",", "character ([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _ ), dots (.), and", "characters, beginning and ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-),", "JSON or YAML file conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def", "argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue.", "Policy file to set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\",", "namespaced name. The name should \" \"be in the form tagValues/{numeric_id}. The namespaced", "\"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's", "parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument", "governing permissions and # limitations under the License. \"\"\"Utilities for defining CRM Tag", "parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent of the resource.\")) def", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "tagValues/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where", "and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \"", "name of the TagKey or TagValue. The field\" \" must be 1-63 characters,", "the TagKey or TagValue's parent to the parser. Args: parser: ArgumentInterceptor, An argparse", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey", "of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser.", "only be set on creation. \" \"This data is validated by the policy", "TagKey that can only be set on creation. \" \"This data is validated", "TagValue. The field\" \" must be 1-63 characters, beginning and ending with an", "purpose data to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds", "help=(\"Purpose specifier of the TagKey that can only be set on creation. \"", "help=(\"User specified, friendly name of the TagKey or TagValue. The field\" \" must", "parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name. The", "for the TagKey's or TagValue's description to the parser. Args: parser: ArgumentInterceptor, An", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "to parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User", "metavar=\"PARENT\", required=required, help=message if message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds", "metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue. \" \"Must not exceed 256", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str},", "\"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue. \" \"Must not exceed", "to a local JSON or YAML formatted file containing a valid \" \"policy.", "-*- # # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description", "defining CRM Tag arguments on a parser.\"\"\" from __future__ import absolute_import from __future__", "def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data to the parser. Args:", "and # limitations under the License. \"\"\"Utilities for defining CRM Tag arguments on", "\"be in the form {resource_type}/{numeric_id}. The namespaced name \" \"should be in the", "field\" \" must be 1-63 characters, beginning and ending with an \" \"alphanumeric", "# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a", "by the policy system that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds", "use this file except in compliance with the License. # You may obtain", "the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True,", "TagKey or TagValue's parent to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from", "\"\"\"Adds argument for the TagKey's or TagValue's description to the parser. Args: parser:", "where short_name \" \"must be 1-63 characters, beginning and ending with an \"", "an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots (.), and", "An argparse parser. required: Boolean, to enforce --parent as a required flag. message:", "the local Policy file to set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "name. The name should \" \"be in the form tagValues/{numeric_id}. The namespaced name", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "\" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _ ), dots", "parent to the parser. Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean, to", "this field adds additional validation from the policy \" \"system that corresponds to", "text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent", "short_name must be 1-63 characters, beginning and ending \" \"with an alphanumeric character", "1-63 characters, beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes", "for the TagKey's purpose to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "file to set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\",", "name \" \"should be in the form {org_id}/{short_name} where short_name \" \"must be", "description to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\",", "and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args: parser:", "2.0 (the \"License\"); # you may not use this file except in compliance", "dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for", "\"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose", "metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or YAML formatted file containing a", "\"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the parser. Args: parser:", "the TagKey or TagValue. The field\" \" must be 1-63 characters, beginning and", "else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or", "for the local Policy file to set. Args: parser: ArgumentInterceptor, An argparse parser.", "the form {org_id}/{short_name} where short_name \" \"must be 1-63 characters, beginning and ending", "and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores", "or YAML file conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser):", "An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The", "a valid \" \"policy. The output of the `get-iam-policy` command is a valid", "unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds positional", "parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue. \" \"Must not", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "\"be in the form tagValues/{numeric_id}. The namespaced name \" \"should be in the", "argument for the local Policy file to set. Args: parser: ArgumentInterceptor, An argparse", "the policy system that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async", "utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved. # #", "import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def AddShortNameArgToParser(parser): \"\"\"Adds", "\"underscores ( _ ), dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True,", "as a required flag. message: String, replacement help text for flag. \"\"\" parser.add_argument(", "metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can only be set on", "resource name argument for the namespaced name or resource name to the parser.", "bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file to set.", "# # Unless required by applicable law or agreed to in writing, software", "express or implied. # See the License for the specific language governing permissions", "\"with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots (.),", "in the form tagValues/{numeric_id}. The namespaced name \" \"should be in the form", "\"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the TagKey that can", "type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the TagKey that can only", "Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag", "(\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the TagKey's or TagValue's", "corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data", "either express or implied. # See the License for the specific language governing", "argument for the location. Args: parser: ArgumentInterceptor, An argparse parser. message: String, help", "\"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name. The name should \"", "local JSON or YAML formatted file containing a valid \" \"policy. The output", "the TagKey's purpose data to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "the `get-iam-policy` command is a valid \" \"file, as is any JSON or", "\"\"\"Adds the TagValue argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "output of the `get-iam-policy` command is a valid \" \"file, as is any", "or resource name to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "TagKey or TagValue. \" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument", "name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be", "parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name", "dashes (-), \" \"underscores (_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message):", "with dashes (-), \" \"underscores (_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser,", "or TagValue. The field\" \" must be 1-63 characters, beginning and ending with", "that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose", "is a valid \" \"file, as is any JSON or YAML file conforming", "the License. # You may obtain a copy of the License at #", "\"\"\"Adds argument for the TagKey's purpose data to the parser. Args: parser: ArgumentInterceptor,", "max_length=1, ), help=(\"Purpose data of the TagKey that can only be set on", "TagKey's purpose to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass", "the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value", "An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "YAML file conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds", "dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser.", "argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that", "creation. \" \"This data is validated by the policy system that corresponds\" \"", "characters, beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-),", "creation. \" \"Specifying this field adds additional validation from the policy \" \"system", "required=required, help=message if message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument", "argument for the TagKey's purpose data to the parser. Args: parser: ArgumentInterceptor, An", "The namespaced name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name", "a valid \" \"file, as is any JSON or YAML file conforming to", "\"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters,", "ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (", "the policy \" \"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument", "specified, friendly name of the TagKey or TagValue. The field\" \" must be", "def AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor, an argparse parser.", "\"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument(", "help=( \"Path to a local JSON or YAML formatted file containing a valid", "dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location.", "permissions and # limitations under the License. \"\"\"Utilities for defining CRM Tag arguments", "\"underscores (_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the", "\" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63", "with the License. # You may obtain a copy of the License at", "256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to the parser.", "message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's parent to the parser. Args:", "to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument", "must be 1-63 characters, beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z])", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "\"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced name or", "argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name.", "be set on creation. \" \"Specifying this field adds additional validation from the", "\"policy. The output of the `get-iam-policy` command is a valid \" \"file, as", "on creation. \" \"Specifying this field adds additional validation from the policy \"", "should \" \"be in the form tagValues/{numeric_id}. The namespaced name \" \"should be", "structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the TagValue argument to the", "help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser):", "required: Boolean, to enforce --parent as a required flag. message: String, replacement help", "AddShortNameArgToParser(parser): \"\"\"Adds positional argument to parser. Args: parser: ArgumentInterceptor, an argparse parser. \"\"\"", "ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of", "law or agreed to in writing, software # distributed under the License is", "\" \"Specifying this field adds additional validation from the policy \" \"system that", "\" \"( _ ), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force", "the License for the specific language governing permissions and # limitations under the", "argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--force\",", "\"where short_name must be 1-63 characters, beginning and ending \" \"with an alphanumeric", "in the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters, beginning and", "the form tagValues/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name}", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "must be 1-63 characters, beginning and ending \" \"with an alphanumeric character ([a-z0-9A-Z])", "from the policy \" \"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds", "name argument for the namespaced name or resource name to the parser. Args:", "Tag arguments on a parser.\"\"\" from __future__ import absolute_import from __future__ import division", "argparse parser. \"\"\" base.ASYNC_FLAG.AddToParser(parser) def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced", "\"( _ ), dots (.), and alphanumerics between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument", "\"\"\"Adds async flag to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "\"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name. The name", "metavar=\"SHORT_NAME\", help=(\"User specified, friendly name of the TagKey or TagValue. The field\" \"", "\"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _ ), dots (.),", "be in the form {org_id}/{short_name} where short_name \" \"must be 1-63 characters, beginning", "\"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue. \" \"Must", "\"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else (\"Parent of the resource.\"))", "argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or", "An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON", "\" \"underscores (_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "\" \"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args: parser: ArgumentInterceptor, An", "from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "argparse parser. \"\"\" parser.add_argument( \"--force\", action=\"store_true\", help=(\"Force argument to bypass checks.\")) def AddPolicyFileArgToParser(parser):", "parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local JSON or YAML formatted file containing", "# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the", "the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data to the", "command is a valid \" \"file, as is any JSON or YAML file", "form tagValues/{numeric_id}. The namespaced name \" \"should be in the form {org_id}/{tag_key_short_name}/{short_name} \"", "short_name \" \"must be 1-63 characters, beginning and ending with an \" \"alphanumeric", "See the License for the specific language governing permissions and # limitations under", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. required: Boolean, to enforce", "TagKey's or TagValue's description to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "to enforce --parent as a required flag. message: String, replacement help text for", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "TagKey that can only be set on creation. \" \"Specifying this field adds", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "between.\")) def AddForceArgToParser(parser): \"\"\"Adds force argument to the parser. Args: parser: ArgumentInterceptor, An", "\"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor, An argparse parser. message: String,", "from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers from googlecloudsdk.calliope import base def", "not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores ( _ ),", "(-), \" \"underscores (_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds", "adds additional validation from the policy \" \"system that corresponds to the purpose.\"))", "AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data to the parser. Args: parser:", "\" \"must be 1-63 characters, beginning and ending with an \" \"alphanumeric character", "name or namespaced name. The name should \" \"be in the form tagValues/{numeric_id}.", "or TagValue. \" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for", "to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data to", "description of the TagKey or TagValue. \" \"Must not exceed 256 characters.\")) def", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name", "\"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can only be set", "required=True, help=(\"Tag value name or namespaced name. The name should \" \"be in", "parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of", "resource name to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "\"\"\"Adds argument for the local Policy file to set. Args: parser: ArgumentInterceptor, An", "validated by the policy system that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser):", "an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \" \"( _ ),", "parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of the TagKey or TagValue. \"", "be set on creation. \" \"This data is validated by the policy system", "the TagKey or TagValue. \" \"Must not exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds", "ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to a local", "Version 2.0 (the \"License\"); # you may not use this file except in", "message: String, replacement help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message", "\"file, as is any JSON or YAML file conforming to the structure of", "except in compliance with the License. # You may obtain a copy of", "parser: ArgumentInterceptor, An argparse parser. required: Boolean, to enforce --parent as a required", "additional validation from the policy \" \"system that corresponds to the purpose.\")) def", "alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor,", "as is any JSON or YAML file conforming to the structure of \"", "\"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or namespaced name. The resource name should \" \"be", "the form {org_id}/{tag_key_short_name}/{short_name} \" \"where short_name must be 1-63 characters, beginning and ending", "the TagKey's or TagValue's description to the parser. Args: parser: ArgumentInterceptor, An argparse", "([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots (.), and alphanumerics between.\")) def", "\" \"where short_name must be 1-63 characters, beginning and ending \" \"with an", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "a required flag. message: String, replacement help text for flag. \"\"\" parser.add_argument( \"--parent\",", "purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose data to the parser.", "argument to bypass checks.\")) def AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file", "\")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument for the TagKey or TagValue's parent", "The namespaced name \" \"should be in the form {org_id}/{short_name} where short_name \"", "dashes (-), \" \"underscores ( _ ), dots (.), and alphanumerics between. \"))", "local Policy file to set. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument(", "formatted file containing a valid \" \"policy. The output of the `get-iam-policy` command", "parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of the TagKey that", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"RESOURCE_NAME\", metavar=\"RESOURCE_NAME\", help=(\"Resource name or", "CRM Tag arguments on a parser.\"\"\" from __future__ import absolute_import from __future__ import", "a parser.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import", "AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced name or resource name to", "policy system that corresponds\" \" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"POLICY_FILE\", metavar=\"POLICY_FILE\", help=( \"Path to", "2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License,", "data of the TagKey that can only be set on creation. \" \"This", "Args: parser: ArgumentInterceptor, an argparse parser. \"\"\" parser.add_argument( \"short_name\", metavar=\"SHORT_NAME\", help=(\"User specified, friendly", "that can only be set on creation. \" \"Specifying this field adds additional", "be 1-63 characters, beginning and ending \" \"with an alphanumeric character ([a-z0-9A-Z]) with", "(_), dots (.), and alphanumerics between.\")) def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the", "argparse parser. \"\"\" parser.add_argument( \"--purpose-data\", type=arg_parsers.ArgDict( spec={\"network\": str}, max_length=1, ), help=(\"Purpose data of", "parser.add_argument( \"--tag-value\", metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name. The name should", "name or resource name to the parser. Args: parser: ArgumentInterceptor, An argparse parser.", "help=(\"Purpose data of the TagKey that can only be set on creation. \"", "policy \" \"system that corresponds to the purpose.\")) def AddPurposeDataArgToParser(parser): \"\"\"Adds argument for", "arguments on a parser.\"\"\" from __future__ import absolute_import from __future__ import division from", "file containing a valid \" \"policy. The output of the `get-iam-policy` command is", "under the License. \"\"\"Utilities for defining CRM Tag arguments on a parser.\"\"\" from", "absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.calliope import arg_parsers", "String, replacement help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if", "character ([a-z0-9A-Z]) with dashes (-), underscores \" \"( _ ), dots (.), and", "AddPolicyFileArgToParser(parser): \"\"\"Adds argument for the local Policy file to set. Args: parser: ArgumentInterceptor,", "file conforming to the structure of \" \"a [Policy](https://cloud.google.com/iam/reference/rest/v1/Policy).\")) def AddTagValueArgToParser(parser): \"\"\"Adds the", "alphanumeric character ([a-z0-9A-Z]) with dashes (-), \" \"underscores (_), dots (.), and alphanumerics", "parser.add_argument( \"--purpose\", metavar=\"PURPOSE\", choices=[\"GCE_FIREWALL\"], help=(\"Purpose specifier of the TagKey that can only be", "help=message if message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for", "only be set on creation. \" \"Specifying this field adds additional validation from", "metavar=\"TAG_VALUE\", required=True, help=(\"Tag value name or namespaced name. The name should \" \"be", "Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\", help=(\"User-assigned description of", "\" to the purpose.\")) def AddAsyncArgToParser(parser): \"\"\"Adds async flag to the parser. Args:", "the namespaced name or resource name to the parser. Args: parser: ArgumentInterceptor, An", "characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to the parser. Args:", "specifier of the TagKey that can only be set on creation. \" \"Specifying", "exceed 256 characters.\")) def AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to the", "ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores \" \"(", "), dots (.), and alphanumerics between. \")) def AddParentArgToParser(parser, required=True, message=\"\"): \"\"\"Adds argument", "to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\" parser.add_argument( \"--description\", metavar=\"DESCRIPTION\",", "beginning and ending with an \" \"alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores", "help text for flag. \"\"\" parser.add_argument( \"--parent\", metavar=\"PARENT\", required=required, help=message if message else", "AddPurposeArgToParser(parser): \"\"\"Adds argument for the TagKey's purpose to the parser. Args: parser: ArgumentInterceptor,", "for the TagKey's purpose data to the parser. Args: parser: ArgumentInterceptor, An argparse", "\"\"\"Adds force argument to the parser. Args: parser: ArgumentInterceptor, An argparse parser. \"\"\"", "def AddResourceNameArgToParser(parser): \"\"\"Adds resource name argument for the namespaced name or resource name", "if message else (\"Parent of the resource.\")) def AddDescriptionArgToParser(parser): \"\"\"Adds argument for the", "argument for the TagKey's purpose to the parser. Args: parser: ArgumentInterceptor, An argparse", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "on a parser.\"\"\" from __future__ import absolute_import from __future__ import division from __future__", "\" \"be in the form {resource_type}/{numeric_id}. The namespaced name \" \"should be in", "or namespaced name. The name should \" \"be in the form tagValues/{numeric_id}. The", "location. Args: parser: ArgumentInterceptor, An argparse parser. message: String, help text for flag.", "def AddLocationArgToParser(parser, message): \"\"\"Adds argument for the location. Args: parser: ArgumentInterceptor, An argparse" ]
[ "<reponame>jeevb/awsm from awsm.validators import yaml_dict from voluptuous import All, Coerce, Schema HOOK_SCHEMA =", "[str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({ 'vars': HOOK_VARS_SCHEMA,", "voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict]", "[dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({ 'vars': HOOK_VARS_SCHEMA, str: HOOK_SCHEMA", "{ 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({", "Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA =", "from awsm.validators import yaml_dict from voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict),", "})) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({ 'vars': HOOK_VARS_SCHEMA, str: HOOK_SCHEMA })", "from voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks':", "awsm.validators import yaml_dict from voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), {", "HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict)", "import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] }))", "All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA", "yaml_dict from voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str],", "'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({ 'vars':", "import yaml_dict from voluptuous import All, Coerce, Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes':", "= Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA", "Schema HOOK_SCHEMA = Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict),", "'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA = Schema({ 'vars': HOOK_VARS_SCHEMA, str:", "Schema(All(Coerce(yaml_dict), { 'includes': [str], 'tasks': [dict] })) HOOK_VARS_SCHEMA = All(Coerce(yaml_dict), dict) HOOKS_CFG_SCHEMA =" ]
[ "response = jsonify( { 'error': str(error) } ) response.status_code = 401 # Don't", "defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( {", ") response.status_code = 401 # Don't do it this way, just for an", "import Logger from wordservice import create_app # Call the Application Factory function to", "jsonify from pyawsstarter import Logger from wordservice import create_app # Call the Application", "it this way, just for an example return response if __name__ == '__main__':", "application instance # using the standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg')", "} ) response.status_code = 401 # Don't do it this way, just for", "Don't do it this way, just for an example return response if __name__", "the standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response", "create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error': str(error) } ) response.status_code", "Logger from wordservice import create_app # Call the Application Factory function to construct", "to construct a Flask application instance # using the standard configuration defined in", "import create_app # Call the Application Factory function to construct a Flask application", "the Application Factory function to construct a Flask application instance # using the", "for an example return response if __name__ == '__main__': Logger.get_logger('wordservice').info('Starting wordservice') application.run(host='0.0.0.0', port=8080)", "construct a Flask application instance # using the standard configuration defined in /instance/flask.cfg", "create_app # Call the Application Factory function to construct a Flask application instance", "# using the standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def", "def handle_invalid_usage(error): response = jsonify( { 'error': str(error) } ) response.status_code = 401", "{ 'error': str(error) } ) response.status_code = 401 # Don't do it this", "import jsonify from pyawsstarter import Logger from wordservice import create_app # Call the", "# Don't do it this way, just for an example return response if", "= 401 # Don't do it this way, just for an example return", "configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify(", "a Flask application instance # using the standard configuration defined in /instance/flask.cfg application", "wordservice import create_app # Call the Application Factory function to construct a Flask", "= jsonify( { 'error': str(error) } ) response.status_code = 401 # Don't do", "application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error': str(error) }", "flask import jsonify from pyawsstarter import Logger from wordservice import create_app # Call", "using the standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error):", "str(error) } ) response.status_code = 401 # Don't do it this way, just", "401 # Don't do it this way, just for an example return response", "response.status_code = 401 # Don't do it this way, just for an example", "/instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error': str(error)", "function to construct a Flask application instance # using the standard configuration defined", "standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response =", "Call the Application Factory function to construct a Flask application instance # using", "this way, just for an example return response if __name__ == '__main__': Logger.get_logger('wordservice').info('Starting", "way, just for an example return response if __name__ == '__main__': Logger.get_logger('wordservice').info('Starting wordservice')", "Flask application instance # using the standard configuration defined in /instance/flask.cfg application =", "@application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error': str(error) } ) response.status_code =", "= create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error': str(error) } )", "from pyawsstarter import Logger from wordservice import create_app # Call the Application Factory", "just for an example return response if __name__ == '__main__': Logger.get_logger('wordservice').info('Starting wordservice') application.run(host='0.0.0.0',", "Application Factory function to construct a Flask application instance # using the standard", "pyawsstarter import Logger from wordservice import create_app # Call the Application Factory function", "jsonify( { 'error': str(error) } ) response.status_code = 401 # Don't do it", "'error': str(error) } ) response.status_code = 401 # Don't do it this way,", "handle_invalid_usage(error): response = jsonify( { 'error': str(error) } ) response.status_code = 401 #", "from wordservice import create_app # Call the Application Factory function to construct a", "do it this way, just for an example return response if __name__ ==", "# Call the Application Factory function to construct a Flask application instance #", "instance # using the standard configuration defined in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception)", "from flask import jsonify from pyawsstarter import Logger from wordservice import create_app #", "in /instance/flask.cfg application = create_app('flask.cfg') @application.errorhandler(Exception) def handle_invalid_usage(error): response = jsonify( { 'error':", "Factory function to construct a Flask application instance # using the standard configuration" ]
[ "import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'), # url(r'^pass_rec/$', views.pass_rec, name='pass_rec'),", "django.conf.urls import url from django.contrib import admin import views admin.autodiscover() urlpatterns = [", "from django.contrib import admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'),", "import admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'), # url(r'^pass_rec/$',", "django.contrib import admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'), #", "views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'), # url(r'^pass_rec/$', views.pass_rec, name='pass_rec'), ]", "url from django.contrib import admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget,", "admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$', views.pass_forget, name='pass_forget'), # url(r'^pass_rec/$', views.pass_rec,", "from django.conf.urls import url from django.contrib import admin import views admin.autodiscover() urlpatterns =", "import url from django.contrib import admin import views admin.autodiscover() urlpatterns = [ url(r'^pass_forget/$'," ]
[ "the stores api given an id\"\"\" payload = {\"query\": f\"storeId={store_id}\", \"params\": kwargs} return", "BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= # Search by store by name", "store by name or id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the", "or id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given", "\"\"\"Searches the stores api given an id\"\"\" payload = {\"query\": f\"storeId={store_id}\", \"params\": kwargs}", "BestBuyCore from ..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self):", "# Search by store by name or id # ================================= def search_by_id(self, store_id,", "================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given an id\"\"\" payload", "..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API", "================================= # Search by store by name or id # ================================= def search_by_id(self,", "by store by name or id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches", "stores api given an id\"\"\" payload = {\"query\": f\"storeId={store_id}\", \"params\": kwargs} return self._call(payload)", "BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= # Search by store", "**kwargs): \"\"\"Searches the stores api given an id\"\"\" payload = {\"query\": f\"storeId={store_id}\", \"params\":", "..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= # Search", "Search by store by name or id # ================================= def search_by_id(self, store_id, **kwargs):", "search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given an id\"\"\" payload = {\"query\":", "class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= # Search by store by", "STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # =================================", "store_id, **kwargs): \"\"\"Searches the stores api given an id\"\"\" payload = {\"query\": f\"storeId={store_id}\",", "def _api_name(self): return STORES_API # ================================= # Search by store by name or", "import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API #", "name or id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api", "# ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given an id\"\"\"", "def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given an id\"\"\" payload =", "from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= #", "..api.base import BestBuyCore from ..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore):", "_api_name(self): return STORES_API # ================================= # Search by store by name or id", "from ..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return", "return STORES_API # ================================= # Search by store by name or id #", "id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores api given an", "from ..api.base import BestBuyCore from ..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class", "by name or id # ================================= def search_by_id(self, store_id, **kwargs): \"\"\"Searches the stores", "# ================================= # Search by store by name or id # ================================= def", "import BestBuyCore from ..constants import STORES_API from ..utils.exceptions import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def", "import BestBuyStoresAPIError class BestBuyStoresAPI(BestBuyCore): def _api_name(self): return STORES_API # ================================= # Search by", "STORES_API # ================================= # Search by store by name or id # =================================" ]
[ "i in range(1, len(prices)): maxCur = max(0, maxCur + prices[i] - prices[i-1]) maxSoFar", "4] Output: 5 Input: [7, 6, 4, 3, 1] Output: 0 ''' class", "= 0 maxSoFar = 0 for i in range(1, len(prices)): maxCur = max(0,", "day i. If you were only permitted to complete at most one transaction", "array for which the ith element is the price of a given stock", "given stock on day i. If you were only permitted to complete at", "you were only permitted to complete at most one transaction (ie, buy one", "and sell one share of the stock), design an algorithm to find the", "1] Output: 0 ''' class Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int]", "on day i. If you were only permitted to complete at most one", "= min(min_price, price) profit = price - min_price max_profit = max(max_profit, profit) return", "you have an array for which the ith element is the price of", "the price of a given stock on day i. If you were only", "design an algorithm to find the maximum profit. Input: [7, 1, 5, 3,", "\"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit, min_price = 0, float('inf') for", "1, 5, 3, 6, 4] Output: 5 Input: [7, 6, 4, 3, 1]", "min_price = 0, float('inf') for price in prices: min_price = min(min_price, price) profit", "0, float('inf') for price in prices: min_price = min(min_price, price) profit = price", "Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit, min_price", "return maxSoFar if __name__=='__main__': s = Solution() transaction1 = [7, 1, 5, 3,", "= [7, 1, 5, 3, 6, 4] transaction2 = [7, 6, 4, 3,", "Say you have an array for which the ith element is the price", "If you were only permitted to complete at most one transaction (ie, buy", "\"\"\" max_profit, min_price = 0, float('inf') for price in prices: min_price = min(min_price,", "to complete at most one transaction (ie, buy one and sell one share", "max_profit, min_price = 0, float('inf') for price in prices: min_price = min(min_price, price)", "''' Kadane's algorithm. ''' maxCur = 0 maxSoFar = 0 for i in", "t3 = [1, 7, 4, 11] t4 = [0, 6, -3, 7] print", "min_price max_profit = max(max_profit, profit) return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm.", "share of the stock), design an algorithm to find the maximum profit. Input:", "s = Solution() transaction1 = [7, 1, 5, 3, 6, 4] transaction2 =", "profit = price - min_price max_profit = max(max_profit, profit) return max_profit def maxProfit2(self,", "find the maximum profit. Input: [7, 1, 5, 3, 6, 4] Output: 5", "an array for which the ith element is the price of a given", "Output: 5 Input: [7, 6, 4, 3, 1] Output: 0 ''' class Solution(object):", "3, 1] Output: 0 ''' class Solution(object): def maxProfit(self, prices): \"\"\" :type prices:", "is the price of a given stock on day i. If you were", "prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s = Solution() transaction1", "if __name__=='__main__': s = Solution() transaction1 = [7, 1, 5, 3, 6, 4]", "[1, 7, 4, 11] t4 = [0, 6, -3, 7] print (s.maxProfit2(t3)) print", "= price - min_price max_profit = max(max_profit, profit) return max_profit def maxProfit2(self, prices):", "7, 4, 11] t4 = [0, 6, -3, 7] print (s.maxProfit2(t3)) print (s.maxProfit2(t4))", "5, 3, 6, 4] Output: 5 Input: [7, 6, 4, 3, 1] Output:", "one transaction (ie, buy one and sell one share of the stock), design", "prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s =", "max(max_profit, profit) return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur =", "[7, 1, 5, 3, 6, 4] Output: 5 Input: [7, 6, 4, 3,", "for price in prices: min_price = min(min_price, price) profit = price - min_price", "= max(max_profit, profit) return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur", ":rtype: int \"\"\" max_profit, min_price = 0, float('inf') for price in prices: min_price", "prices): ''' Kadane's algorithm. ''' maxCur = 0 maxSoFar = 0 for i", "permitted to complete at most one transaction (ie, buy one and sell one", "maxCur = max(0, maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return", "1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4 =", "were only permitted to complete at most one transaction (ie, buy one and", "6, 4] Output: 5 Input: [7, 6, 4, 3, 1] Output: 0 '''", "sell one share of the stock), design an algorithm to find the maximum", "the maximum profit. Input: [7, 1, 5, 3, 6, 4] Output: 5 Input:", "[7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7,", "print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4 = [0,", "profit. Input: [7, 1, 5, 3, 6, 4] Output: 5 Input: [7, 6,", "prices): \"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit, min_price = 0, float('inf')", "most one transaction (ie, buy one and sell one share of the stock),", "4, 3, 1] Output: 0 ''' class Solution(object): def maxProfit(self, prices): \"\"\" :type", "4] transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3", "at most one transaction (ie, buy one and sell one share of the", "in range(1, len(prices)): maxCur = max(0, maxCur + prices[i] - prices[i-1]) maxSoFar =", "price of a given stock on day i. If you were only permitted", "buy one and sell one share of the stock), design an algorithm to", "+ prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s", "= max(0, maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar", "print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4 = [0, 6, -3,", "transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 =", "max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur = 0 maxSoFar =", "ith element is the price of a given stock on day i. If", "price in prices: min_price = min(min_price, price) profit = price - min_price max_profit", "(ie, buy one and sell one share of the stock), design an algorithm", "3, 6, 4] transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1))", "maxCur = 0 maxSoFar = 0 for i in range(1, len(prices)): maxCur =", "''' maxCur = 0 maxSoFar = 0 for i in range(1, len(prices)): maxCur", "of the stock), design an algorithm to find the maximum profit. Input: [7,", "one share of the stock), design an algorithm to find the maximum profit.", "min_price = min(min_price, price) profit = price - min_price max_profit = max(max_profit, profit)", "- prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s = Solution()", "def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit, min_price =", "an algorithm to find the maximum profit. Input: [7, 1, 5, 3, 6,", "maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s = Solution() transaction1 =", "for i in range(1, len(prices)): maxCur = max(0, maxCur + prices[i] - prices[i-1])", "stock), design an algorithm to find the maximum profit. Input: [7, 1, 5,", "in prices: min_price = min(min_price, price) profit = price - min_price max_profit =", "Kadane's algorithm. ''' maxCur = 0 maxSoFar = 0 for i in range(1,", "[7, 1, 5, 3, 6, 4] transaction2 = [7, 6, 4, 3, 1]", "= 0 for i in range(1, len(prices)): maxCur = max(0, maxCur + prices[i]", "3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4", "algorithm. ''' maxCur = 0 maxSoFar = 0 for i in range(1, len(prices)):", "i. If you were only permitted to complete at most one transaction (ie,", "maxSoFar = 0 for i in range(1, len(prices)): maxCur = max(0, maxCur +", "= 0, float('inf') for price in prices: min_price = min(min_price, price) profit =", "float('inf') for price in prices: min_price = min(min_price, price) profit = price -", "maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit, min_price = 0,", "0 maxSoFar = 0 for i in range(1, len(prices)): maxCur = max(0, maxCur", "prices: List[int] :rtype: int \"\"\" max_profit, min_price = 0, float('inf') for price in", "min(min_price, price) profit = price - min_price max_profit = max(max_profit, profit) return max_profit", "maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur = 0 maxSoFar = 0 for", "the ith element is the price of a given stock on day i.", "6, 4] transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2))", "5, 3, 6, 4] transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2))", "prices: min_price = min(min_price, price) profit = price - min_price max_profit = max(max_profit,", "range(1, len(prices)): maxCur = max(0, maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur,", "[7, 6, 4, 3, 1] Output: 0 ''' class Solution(object): def maxProfit(self, prices):", "Input: [7, 6, 4, 3, 1] Output: 0 ''' class Solution(object): def maxProfit(self,", "a given stock on day i. If you were only permitted to complete", "for which the ith element is the price of a given stock on", "Input: [7, 1, 5, 3, 6, 4] Output: 5 Input: [7, 6, 4,", "max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s = Solution() transaction1 = [7, 1,", "5 Input: [7, 6, 4, 3, 1] Output: 0 ''' class Solution(object): def", "= [1, 7, 4, 11] t4 = [0, 6, -3, 7] print (s.maxProfit2(t3))", "6, 4, 3, 1] Output: 0 ''' class Solution(object): def maxProfit(self, prices): \"\"\"", "= Solution() transaction1 = [7, 1, 5, 3, 6, 4] transaction2 = [7,", "Output: 0 ''' class Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype:", "price) profit = price - min_price max_profit = max(max_profit, profit) return max_profit def", "which the ith element is the price of a given stock on day", "maxSoFar if __name__=='__main__': s = Solution() transaction1 = [7, 1, 5, 3, 6,", "0 for i in range(1, len(prices)): maxCur = max(0, maxCur + prices[i] -", "maximum profit. Input: [7, 1, 5, 3, 6, 4] Output: 5 Input: [7,", "max(0, maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if", "price - min_price max_profit = max(max_profit, profit) return max_profit def maxProfit2(self, prices): '''", "one and sell one share of the stock), design an algorithm to find", "transaction (ie, buy one and sell one share of the stock), design an", "to find the maximum profit. Input: [7, 1, 5, 3, 6, 4] Output:", "have an array for which the ith element is the price of a", "element is the price of a given stock on day i. If you", "len(prices)): maxCur = max(0, maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar)", "1, 5, 3, 6, 4] transaction2 = [7, 6, 4, 3, 1] print(s.maxProfit(transaction1))", ":type prices: List[int] :rtype: int \"\"\" max_profit, min_price = 0, float('inf') for price", "- min_price max_profit = max(max_profit, profit) return max_profit def maxProfit2(self, prices): ''' Kadane's", "print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4 = [0, 6, -3, 7]", "6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4,", "print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11] t4 = [0, 6,", "complete at most one transaction (ie, buy one and sell one share of", "only permitted to complete at most one transaction (ie, buy one and sell", "''' Say you have an array for which the ith element is the", "__name__=='__main__': s = Solution() transaction1 = [7, 1, 5, 3, 6, 4] transaction2", "= max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__': s = Solution() transaction1 = [7,", "= [7, 6, 4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1,", "return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur = 0 maxSoFar", "4, 3, 1] print(s.maxProfit(transaction1)) print(s.maxProfit(transaction2)) print(s.maxProfit2(transaction1)) print(s.maxProfit2(transaction2)) t3 = [1, 7, 4, 11]", "''' class Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int \"\"\"", "stock on day i. If you were only permitted to complete at most", "the stock), design an algorithm to find the maximum profit. Input: [7, 1,", "maxSoFar) return maxSoFar if __name__=='__main__': s = Solution() transaction1 = [7, 1, 5,", "def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur = 0 maxSoFar = 0", "0 ''' class Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int", "algorithm to find the maximum profit. Input: [7, 1, 5, 3, 6, 4]", "int \"\"\" max_profit, min_price = 0, float('inf') for price in prices: min_price =", "profit) return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. ''' maxCur = 0", "max_profit = max(max_profit, profit) return max_profit def maxProfit2(self, prices): ''' Kadane's algorithm. '''", "transaction1 = [7, 1, 5, 3, 6, 4] transaction2 = [7, 6, 4,", "Solution() transaction1 = [7, 1, 5, 3, 6, 4] transaction2 = [7, 6,", "3, 6, 4] Output: 5 Input: [7, 6, 4, 3, 1] Output: 0", "class Solution(object): def maxProfit(self, prices): \"\"\" :type prices: List[int] :rtype: int \"\"\" max_profit,", "of a given stock on day i. If you were only permitted to", "maxCur + prices[i] - prices[i-1]) maxSoFar = max(maxCur, maxSoFar) return maxSoFar if __name__=='__main__':", "List[int] :rtype: int \"\"\" max_profit, min_price = 0, float('inf') for price in prices:" ]
[ "= \"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY", "\"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET", "= \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE =", "DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID", "ID = \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d", "= Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES =", "TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the derived feature due", "metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic", "= Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed", "TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\"", "from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath", "coding: utf-8 -*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir =", ") FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\")", "PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id", "\"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA =", "\"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES = [", "class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET =", "= TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 =", "project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\")", "MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class", "SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME =", "= \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"", "GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3", "Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA", "\"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\"", "PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE", "import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP =", "Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\")", "Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath(", "time series ID = \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT", "class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\"", "-*- coding: utf-8 -*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir", "id passed as column_id/the derived feature due to rolling of time series ID", "FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" )", "SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" #", "series ID = \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT =", "synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1", "= \"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY, SUB_METERING_1, SUB_METERING_2, SUB_METERING_3,", "\"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" #", "TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the", "GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE", "<reponame>JQGoh/multivariate_time_series_pipeline # -*- coding: utf-8 -*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX", "Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as", "class TsFreshEnum(object): # id passed as column_id/the derived feature due to rolling of", "FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES", "DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic series", "pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP", "DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE =", "%H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic series id ID =", "of time series ID = \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\"", "Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the derived feature", "utf-8 -*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1]", "derived feature due to rolling of time series ID = \"id\" SORT =", "= \"datetime\" TARGET = \"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER", "to rolling of time series ID = \"id\" SORT = \"sort\" class DataMetadata(object):", "TARGET = \"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\"", "# PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\")", "= Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA", "\"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets", "= project_dir = Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE =", "\"datetime\" TARGET = \"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER =", "\"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\"", "\"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME", "= \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column", "VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY, SUB_METERING_1, SUB_METERING_2,", "# Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY, SUB_METERING_1, SUB_METERING_2, SUB_METERING_3, VOLTAGE, ]", "= Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the derived feature due to", "= \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 =", "due to rolling of time series ID = \"id\" SORT = \"sort\" class", "TsFreshEnum(object): # id passed as column_id/the derived feature due to rolling of time", "= \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES =", "series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 =", "= Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object):", "as column_id/the derived feature due to rolling of time series ID = \"id\"", "Path(__file__).resolve().parents[1] # PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA =", "\"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY, SUB_METERING_1,", "Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\") TEST_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): #", "= \"sort\" class DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\"", "rolling of time series ID = \"id\" SORT = \"sort\" class DataMetadata(object): \"\"\"Data", "= Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/data_processing_pipelines.pkl\")", "# -*- coding: utf-8 -*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX =", "column_id/the derived feature due to rolling of time series ID = \"id\" SORT", "Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the derived feature due to rolling", "\"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic series id ID", "\"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY, SUB_METERING_1, SUB_METERING_2, SUB_METERING_3, VOLTAGE,", "= \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER, GLOBAL_INTENSITY,", "= \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\" # synthetic series id", "\"Global_active_power\" # synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY =", "-*- from pathlib import Path class FilePathEnum(object): PROJECT_DIR_POSIX = project_dir = Path(__file__).resolve().parents[1] #", "passed as column_id/the derived feature due to rolling of time series ID =", "feature due to rolling of time series ID = \"id\" SORT = \"sort\"", "DataMetadata(object): \"\"\"Data metadata\"\"\" DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\" DATETIME = \"datetime\" TARGET = \"Global_active_power\"", "SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES = [ GLOBAL_REACTIVE_POWER,", "PosixPath DOWNLOADED_ZIP = Path(PROJECT_DIR_POSIX).joinpath( \"data/raw/household_power_consumption.zip\" ) FIGURE = Path(PROJECT_DIR_POSIX).joinpath(\"reports/figures/predictions_global_active_power.png\") MOCK_DATA = Path(PROJECT_DIR_POSIX).joinpath(\"data/interim/mock_data.csv\") PIPELINE", "# id passed as column_id/the derived feature due to rolling of time series", "# synthetic series id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\"", "ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\" SUB_METERING_2", "SUB_METERING_2 = \"Sub_metering_2\" SUB_METERING_3 = \"Sub_metering_3\" VOLTAGE = \"Voltage\" # Column sets NUMERIC_FEATURES", "id ID = TsFreshEnum.ID GLOBAL_REACTIVE_POWER = \"Global_reactive_power\" GLOBAL_INTENSITY = \"Global_intensity\" SUB_METERING_1 = \"Sub_metering_1\"", "= Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/test_data.csv\") TRAIN_FEATURES = Path(PROJECT_DIR_POSIX).joinpath(\"data/processed/train_features.csv\") class TsFreshEnum(object): # id passed as column_id/the derived" ]
[ "i in range(n_walks)] walk_x = [0] * n_steps walk_y = [0] * n_steps", "matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random", "# Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks in", "in range(0, n_steps): # Array of random number rnd = npr.random(3)-0.5 # Norm", "n_steps = 100 # Number of random walks n_walks = 10 walks_x =", "fig = plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i],", "0 for j in range(0, n_steps): # Array of random number rnd =", "plt import random import numpy.random as npr import numpy as np # Number", "in range(n_walks)] walk_x = [0] * n_steps walk_y = [0] * n_steps walk_z", "norm = np.linalg.norm(rnd) rnd = rnd / norm x = rnd[0] + x", "= fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1],", "+ x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j]", "number rnd = npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd = rnd", "following way: x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j]", "= 10 walks_x = [[0] * n_steps for i in range(n_walks)] walks_y =", "x = rnd[0] + x y = rnd[1] + y z = rnd[2]", "range(n_walks)] walk_x = [0] * n_steps walk_y = [0] * n_steps walk_z =", "# Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks in 3D dimension') plt.grid(True) plt.show()", "= np.linalg.norm(rnd) rnd = rnd / norm x = rnd[0] + x y", "[[0] * n_steps for i in range(n_walks)] walks_z = [[0] * n_steps for", "norm x = rnd[0] + x y = rnd[1] + y z =", "+ z**2; walk_x[j] = x walk_y[j] = y walk_z[j] = z walks_x[i] =", "+ z # <x> = 0 so variance can # be calculated in", "= [0.0] * n_steps # Generate random walk for i in range(0, n_walks):", "Axes3D import matplotlib.pyplot as plt import random import numpy.random as npr import numpy", "final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks in 3D dimension')", "walks_z[i][-1], c='b', marker='o') # Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend()", "= 10 fig = plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i],", "<reponame>szarejkodariusz/3DRandomWalksInPython import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt", "i in range(n_walks)] walks_z = [[0] * n_steps for i in range(n_walks)] walk_x", "* n_steps # Generate random walk for i in range(0, n_walks): x =", "# <x> = 0 so variance can # be calculated in the following", "for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d')", "y walk_z[j] = z walks_x[i] = [x for x in walk_x] walks_y[i] =", "walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point # Plot plt.xlabel('x')", "y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] = x walk_y[j] = y walk_z[j]", "0 so variance can # be calculated in the following way: x2ave[j] =", "mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks):", "mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random import numpy.random", "= 0 y = 0 z = 0 for j in range(0, n_steps):", "= [0] * n_steps walk_y = [0] * n_steps walk_z = [0] *", "n_steps for i in range(n_walks)] walks_y = [[0] * n_steps for i in", "for i in range(n_walks)] walks_y = [[0] * n_steps for i in range(n_walks)]", "in range(0, n_walks): x = 0 y = 0 z = 0 for", "y = 0 z = 0 for j in range(0, n_steps): # Array", "in range(n_walks)] walks_z = [[0] * n_steps for i in range(n_walks)] walk_x =", "for y in walk_y] walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize'] =", "<x> = 0 so variance can # be calculated in the following way:", "[0] * n_steps x2ave = [0.0] * n_steps y2ave = [0.0] * n_steps", "import random import numpy.random as npr import numpy as np # Number of", "in range(n_walks)] walks_y = [[0] * n_steps for i in range(n_walks)] walks_z =", "= [[0] * n_steps for i in range(n_walks)] walk_x = [0] * n_steps", "the following way: x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2;", "x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2;", "array norm = np.linalg.norm(rnd) rnd = rnd / norm x = rnd[0] +", "walks_x = [[0] * n_steps for i in range(n_walks)] walks_y = [[0] *", "range(0, n_steps): # Array of random number rnd = npr.random(3)-0.5 # Norm array", "walk_x[j] = x walk_y[j] = y walk_z[j] = z walks_x[i] = [x for", "[[0] * n_steps for i in range(n_walks)] walks_y = [[0] * n_steps for", "= y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] = x walk_y[j]", "+ y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] = x walk_y[j] = y", "walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z')", "= [0.0] * n_steps y2ave = [0.0] * n_steps z2ave = [0.0] *", "= 0 z = 0 for j in range(0, n_steps): # Array of", "= [0.0] * n_steps z2ave = [0.0] * n_steps r2ave = [0.0] *", "plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk')", "for j in range(0, n_steps): # Array of random number rnd = npr.random(3)-0.5", "i in range(0, n_walks): x = 0 y = 0 z = 0", "# Array of random number rnd = npr.random(3)-0.5 # Norm array norm =", "= 0 for j in range(0, n_steps): # Array of random number rnd", "in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') for i", "rnd = rnd / norm x = rnd[0] + x y = rnd[1]", "n_walks): x = 0 y = 0 z = 0 for j in", "as npr import numpy as np # Number of steps n_steps = 100", "0 z = 0 for j in range(0, n_steps): # Array of random", "rnd[2] + z # <x> = 0 so variance can # be calculated", "fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1],", "[0.0] * n_steps # Generate random walk for i in range(0, n_walks): x", "[z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax =", "Generate random walk for i in range(0, n_walks): x = 0 y =", "can # be calculated in the following way: x2ave[j] = x2ave[j] + x**2;", "n_steps y2ave = [0.0] * n_steps z2ave = [0.0] * n_steps r2ave =", "steps n_steps = 100 # Number of random walks n_walks = 10 walks_x", "i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o')", "y in walk_y] walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10", "walk_y = [0] * n_steps walk_z = [0] * n_steps x2ave = [0.0]", "rnd[0] + x y = rnd[1] + y z = rnd[2] + z", "[0] * n_steps walk_y = [0] * n_steps walk_z = [0] * n_steps", "rnd = npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd = rnd /", "= rnd / norm x = rnd[0] + x y = rnd[1] +", "n_steps for i in range(n_walks)] walk_x = [0] * n_steps walk_y = [0]", "npr import numpy as np # Number of steps n_steps = 100 #", "Number of steps n_steps = 100 # Number of random walks n_walks =", "as mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random import", "walks n_walks = 10 walks_x = [[0] * n_steps for i in range(n_walks)]", "import numpy.random as npr import numpy as np # Number of steps n_steps", "* n_steps for i in range(n_walks)] walk_x = [0] * n_steps walk_y =", "import numpy as np # Number of steps n_steps = 100 # Number", "i in range(n_walks)] walks_y = [[0] * n_steps for i in range(n_walks)] walks_z", "x y = rnd[1] + y z = rnd[2] + z # <x>", "np.linalg.norm(rnd) rnd = rnd / norm x = rnd[0] + x y =", "= [[0] * n_steps for i in range(n_walks)] walks_y = [[0] * n_steps", "z2ave[j] + z**2; walk_x[j] = x walk_y[j] = y walk_z[j] = z walks_x[i]", "10 walks_x = [[0] * n_steps for i in range(n_walks)] walks_y = [[0]", "x2ave = [0.0] * n_steps y2ave = [0.0] * n_steps z2ave = [0.0]", "x = 0 y = 0 z = 0 for j in range(0,", "ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final", "numpy as np # Number of steps n_steps = 100 # Number of", "# be calculated in the following way: x2ave[j] = x2ave[j] + x**2; y2ave[j]", "[0.0] * n_steps y2ave = [0.0] * n_steps z2ave = [0.0] * n_steps", "x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j]", "from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random import numpy.random as", "n_steps for i in range(n_walks)] walks_z = [[0] * n_steps for i in", "matplotlib.pyplot as plt import random import numpy.random as npr import numpy as np", "[0.0] * n_steps r2ave = [0.0] * n_steps # Generate random walk for", "walks_y = [[0] * n_steps for i in range(n_walks)] walks_z = [[0] *", "/ norm x = rnd[0] + x y = rnd[1] + y z", "n_steps walk_z = [0] * n_steps x2ave = [0.0] * n_steps y2ave =", "walk_y] walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig =", "import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import", "point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks in 3D dimension') plt.grid(True)", "random import numpy.random as npr import numpy as np # Number of steps", "as np # Number of steps n_steps = 100 # Number of random", "numpy.random as npr import numpy as np # Number of steps n_steps =", "+ x y = rnd[1] + y z = rnd[2] + z #", "in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') #", "y = rnd[1] + y z = rnd[2] + z # <x> =", "= plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random", "z = 0 for j in range(0, n_steps): # Array of random number", "be calculated in the following way: x2ave[j] = x2ave[j] + x**2; y2ave[j] =", "range(n_walks)] walks_y = [[0] * n_steps for i in range(n_walks)] walks_z = [[0]", "x in walk_x] walks_y[i] = [y for y in walk_y] walks_z[i] = [z", "n_steps x2ave = [0.0] * n_steps y2ave = [0.0] * n_steps z2ave =", "= rnd[0] + x y = rnd[1] + y z = rnd[2] +", "x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] =", "= x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j] +", "walks_x[i] = [x for x in walk_x] walks_y[i] = [y for y in", "Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks in 3D", "z2ave[j] = z2ave[j] + z**2; walk_x[j] = x walk_y[j] = y walk_z[j] =", "= [y for y in walk_y] walks_z[i] = [z for z in walk_z]", "= [[0] * n_steps for i in range(n_walks)] walks_z = [[0] * n_steps", "= [0] * n_steps x2ave = [0.0] * n_steps y2ave = [0.0] *", "* n_steps for i in range(n_walks)] walks_y = [[0] * n_steps for i", "in the following way: x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j] +", "mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random import numpy.random as npr", "y z = rnd[2] + z # <x> = 0 so variance can", "in walk_x] walks_y[i] = [y for y in walk_y] walks_z[i] = [z for", "ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point # Plot plt.xlabel('x') plt.ylabel('y')", "= [0] * n_steps walk_z = [0] * n_steps x2ave = [0.0] *", "z**2; walk_x[j] = x walk_y[j] = y walk_z[j] = z walks_x[i] = [x", "of random walks n_walks = 10 walks_x = [[0] * n_steps for i", "label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point # Plot", "walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point", "z # <x> = 0 so variance can # be calculated in the", "walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting final point #", "0 y = 0 z = 0 for j in range(0, n_steps): #", "walk_x] walks_y[i] = [y for y in walk_y] walks_z[i] = [z for z", "random number rnd = npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd =", "walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure()", "y2ave = [0.0] * n_steps z2ave = [0.0] * n_steps r2ave = [0.0]", "# Norm array norm = np.linalg.norm(rnd) rnd = rnd / norm x =", "c='b', marker='o') # Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random", "walk_z[j] = z walks_x[i] = [x for x in walk_x] walks_y[i] = [y", "= npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd = rnd / norm", "# Number of random walks n_walks = 10 walks_x = [[0] * n_steps", "* n_steps z2ave = [0.0] * n_steps r2ave = [0.0] * n_steps #", "rnd[1] + y z = rnd[2] + z # <x> = 0 so", "way: x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j] + y**2; z2ave[j] =", "[0] * n_steps walk_z = [0] * n_steps x2ave = [0.0] * n_steps", "walk for i in range(0, n_walks): x = 0 y = 0 z", "[0.0] * n_steps z2ave = [0.0] * n_steps r2ave = [0.0] * n_steps", "npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd = rnd / norm x", "range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b', marker='o') # Ploting", "of steps n_steps = 100 # Number of random walks n_walks = 10", "walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') for i in", "Norm array norm = np.linalg.norm(rnd) rnd = rnd / norm x = rnd[0]", "as plt import random import numpy.random as npr import numpy as np #", "y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] = x walk_y[j] =", "walk_x = [0] * n_steps walk_y = [0] * n_steps walk_z = [0]", "n_steps z2ave = [0.0] * n_steps r2ave = [0.0] * n_steps # Generate", "Array of random number rnd = npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd)", "+ y z = rnd[2] + z # <x> = 0 so variance", "n_steps walk_y = [0] * n_steps walk_z = [0] * n_steps x2ave =", "ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1],", "z walks_x[i] = [x for x in walk_x] walks_y[i] = [y for y", "x walk_y[j] = y walk_z[j] = z walks_x[i] = [x for x in", "range(n_walks)] walks_z = [[0] * n_steps for i in range(n_walks)] walk_x = [0]", "for i in range(0, n_walks): x = 0 y = 0 z =", "# Generate random walk for i in range(0, n_walks): x = 0 y", "* n_steps r2ave = [0.0] * n_steps # Generate random walk for i", "# Number of steps n_steps = 100 # Number of random walks n_walks", "walk_y[j] = y walk_z[j] = z walks_x[i] = [x for x in walk_x]", "= [0.0] * n_steps r2ave = [0.0] * n_steps # Generate random walk", "so variance can # be calculated in the following way: x2ave[j] = x2ave[j]", "n_steps r2ave = [0.0] * n_steps # Generate random walk for i in", "* n_steps x2ave = [0.0] * n_steps y2ave = [0.0] * n_steps z2ave", "= rnd[2] + z # <x> = 0 so variance can # be", "z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') for", "variance can # be calculated in the following way: x2ave[j] = x2ave[j] +", "= rnd[1] + y z = rnd[2] + z # <x> = 0", "= [x for x in walk_x] walks_y[i] = [y for y in walk_y]", "random walk for i in range(0, n_walks): x = 0 y = 0", "calculated in the following way: x2ave[j] = x2ave[j] + x**2; y2ave[j] = y2ave[j]", "= [z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax", "walk_z = [0] * n_steps x2ave = [0.0] * n_steps y2ave = [0.0]", "* n_steps for i in range(n_walks)] walks_z = [[0] * n_steps for i", "Number of random walks n_walks = 10 walks_x = [[0] * n_steps for", "random walks n_walks = 10 walks_x = [[0] * n_steps for i in", "r2ave = [0.0] * n_steps # Generate random walk for i in range(0,", "n_steps # Generate random walk for i in range(0, n_walks): x = 0", "= z2ave[j] + z**2; walk_x[j] = x walk_y[j] = y walk_z[j] = z", "of random number rnd = npr.random(3)-0.5 # Norm array norm = np.linalg.norm(rnd) rnd", "= y walk_z[j] = z walks_x[i] = [x for x in walk_x] walks_y[i]", "* n_steps y2ave = [0.0] * n_steps z2ave = [0.0] * n_steps r2ave", "* n_steps walk_y = [0] * n_steps walk_z = [0] * n_steps x2ave", "rnd / norm x = rnd[0] + x y = rnd[1] + y", "import matplotlib.pyplot as plt import random import numpy.random as npr import numpy as", "j in range(0, n_steps): # Array of random number rnd = npr.random(3)-0.5 #", "= 0 so variance can # be calculated in the following way: x2ave[j]", "for x in walk_x] walks_y[i] = [y for y in walk_y] walks_z[i] =", "[[0] * n_steps for i in range(n_walks)] walk_x = [0] * n_steps walk_y", "* n_steps walk_z = [0] * n_steps x2ave = [0.0] * n_steps y2ave", "[y for y in walk_y] walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize']", "for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i], walks_z[i], label='Random walk') ax.scatter(walks_x[i][-1], walks_y[i][-1], walks_z[i][-1], c='b',", "n_walks = 10 walks_x = [[0] * n_steps for i in range(n_walks)] walks_y", "y2ave[j] = y2ave[j] + y**2; z2ave[j] = z2ave[j] + z**2; walk_x[j] = x", "= x walk_y[j] = y walk_z[j] = z walks_x[i] = [x for x", "[x for x in walk_x] walks_y[i] = [y for y in walk_y] walks_z[i]", "10 fig = plt.figure() ax = fig.gca(projection='3d') for i in range(0,n_walks): ax.plot(walks_x[i], walks_y[i],", "100 # Number of random walks n_walks = 10 walks_x = [[0] *", "for i in range(n_walks)] walks_z = [[0] * n_steps for i in range(n_walks)]", "np # Number of steps n_steps = 100 # Number of random walks", "z2ave = [0.0] * n_steps r2ave = [0.0] * n_steps # Generate random", "= z walks_x[i] = [x for x in walk_x] walks_y[i] = [y for", "walks_y[i] = [y for y in walk_y] walks_z[i] = [z for z in", "marker='o') # Ploting final point # Plot plt.xlabel('x') plt.ylabel('y') #plt.zlabel('z') ax.legend() plt.title('Random walks", "= 100 # Number of random walks n_walks = 10 walks_x = [[0]", "n_steps): # Array of random number rnd = npr.random(3)-0.5 # Norm array norm", "z = rnd[2] + z # <x> = 0 so variance can #", "walks_z = [[0] * n_steps for i in range(n_walks)] walk_x = [0] *", "import Axes3D import matplotlib.pyplot as plt import random import numpy.random as npr import", "in walk_y] walks_z[i] = [z for z in walk_z] mpl.rcParams['legend.fontsize'] = 10 fig", "for i in range(n_walks)] walk_x = [0] * n_steps walk_y = [0] *", "range(0, n_walks): x = 0 y = 0 z = 0 for j" ]
[ "\"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths,", "TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0", "1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35,", "in data_sets for entry in data_set.entries): solute = [ component.smiles for component in", "root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field =", "lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[", "<reponame>jthorton/double-exp-vdw import os.path from glob import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol,", "force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for", "[] for entry in (entry for data_set in data_sets for entry in data_set.entries):", "DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from tqdm import tqdm", ") ), ] schemas = [] for entry in (entry for data_set in", "entry.components if component.role == \"Solute\" ][0] solvent = [ component.smiles for component in", "], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob( os.path.join(", "temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0],", "alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50,", "import os.path from glob import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State,", "\"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True", "unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0],", "0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField", "component.smiles for component in entry.components if component.role == \"Solute\" ][0] solvent = [", "import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from", "0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "import unit from tqdm import tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join(", "solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol(", "solute = [ component.smiles for component in entry.components if component.role == \"Solute\" ][0]", "== \"Solute\" ][0] solvent = [ component.smiles for component in entry.components if component.role", "allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with", "schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 *", "0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00,", "1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\",", "(entry for data_set in data_sets for entry in data_set.entries): solute = [ component.smiles", "state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0,", "= [ component.smiles for component in entry.components if component.role == \"Solvent\" ][0] schema", "if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent:", "* unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25,", "1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ),", "with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2)) if __name__ ==", "pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5,", "] schemas = [] for entry in (entry for data_set in data_sets for", "), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70,", "from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import", "from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from", "absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet", "0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00,", ") from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit", "in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\",", "i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file:", "openmm import unit from tqdm import tqdm def main(): data_sets = [ DataSet.parse_file(", "for entry in data_set.entries): solute = [ component.smiles for component in entry.components if", "\"Solute\" ][0] solvent = [ component.smiles for component in entry.components if component.role ==", "0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00,", "os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in", ") for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\")", ") os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"),", "os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True,", "0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00,", "load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas):", "root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path,", "\"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for entry in (entry for data_set", "force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field", "component in entry.components if component.role == \"Solute\" ][0] solvent = [ component.smiles for", "1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ),", "alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol(", "0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00,", "1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0,", "root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path,", "force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\",", "][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15", "\"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force", "System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm", "solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere", "\"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for entry in (entry", "entry.components if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None,", "data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file(", "\"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ),", "\"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2)) if __name__ == \"__main__\": main()", "in data_set.entries): solute = [ component.smiles for component in entry.components if component.role ==", "1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25,", "\"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i +", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500,", "= glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for", "from tqdm import tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\",", "\"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True,", "1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol(", "component.smiles for component in entry.components if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema(", "0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05,", "data_set in data_sets for entry in data_set.entries): solute = [ component.smiles for component", "EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import", "for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as", "0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00,", "in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2)) if", "solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0,", "\"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, )", "1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000", "0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob(", "from glob import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema,", "for entry in (entry for data_set in data_sets for entry in data_set.entries): solute", "import ForceField from openmm import unit from tqdm import tqdm def main(): data_sets", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ],", "force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\",", "\"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\",", "0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), )", "in (entry for data_set in data_sets for entry in data_set.entries): solute = [", "[ component.smiles for component in entry.components if component.role == \"Solvent\" ][0] schema =", "\"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name", "0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50,", "component.role == \"Solute\" ][0] solvent = [ component.smiles for component in entry.components if", "sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00,", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ),", "os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path,", "lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500,", "ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema", "= os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField(", "force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in", "nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from tqdm", "][0] solvent = [ component.smiles for component in entry.components if component.role == \"Solvent\"", "( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff", "\"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for entry in (entry for", "os.path from glob import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System,", "], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) )", "import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from", "DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = []", "* unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0,", "main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ),", "\"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file(", "openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from tqdm import tqdm def main():", "tqdm import tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\",", "), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas =", "component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000}", "n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\",", "0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths", "0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75,", "system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 *", "), ), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\",", "for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\",", "), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\",", "State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from", "for data_set in data_sets for entry in data_set.entries): solute = [ component.smiles for", "production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\",", "TransferFreeEnergySchema, ) from nonbonded.library.models.datasets import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import", "\"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\",", "1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ),", "0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths =", "tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\",", "1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30,", ") schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\",", "schemas = [] for entry in (entry for data_set in data_sets for entry", "in entry.components if component.role == \"Solute\" ][0] solvent = [ component.smiles for component", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ),", ") ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path =", ") for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join(", ") ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas", "0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00,", "\"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"):", "= ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True ) force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i,", "open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2)) if __name__ == \"__main__\":", "from openmm import unit from tqdm import tqdm def main(): data_sets = [", "schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2))", "[ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\",", "glob import glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, )", "unit from tqdm import tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\",", "desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name,", "from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from tqdm import tqdm def", "lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "for component in entry.components if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System(", "data_sets for entry in data_set.entries): solute = [ component.smiles for component in entry.components", "import DataSet from openff.toolkit.typing.engines.smirnoff import ForceField from openmm import unit from tqdm import", "\"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4]", "DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\",", "schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", )", "= [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join(", "), state=State( temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0,", "1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15,", "data_set.entries): solute = [ component.smiles for component in entry.components if component.role == \"Solute\"", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000", "sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\",", "glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\", \"*\", \"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path", "solvent = [ component.smiles for component in entry.components if component.role == \"Solvent\" ][0]", "\"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for entry", "tqdm(force_field_paths, desc=\"force field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\",", "for component in entry.components if component.role == \"Solute\" ][0] solvent = [ component.smiles", "0.75, 0.5, 0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00,", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "field\"): root_name = force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, )", "enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"), \"w\") as file: file.write(schema.json(indent=2)) if __name__", "\"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\")) force_field = ForceField( force_field_path, load_plugins=True, allow_cosmetic_attributes=True )", "os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i", "0.25, 0.0], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00,", "glob from absolv.models import ( EquilibriumProtocol, SimulationProtocol, State, System, TransferFreeEnergySchema, ) from nonbonded.library.models.datasets", "\"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\",", "if component.role == \"Solute\" ][0] solvent = [ component.smiles for component in entry.components", "), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60,", "component in entry.components if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute:", "import tqdm def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\",", "0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "= force_field_path.split(os.sep)[-4] root_path = os.path.join( \"..\", \"inputs-and-results\", \"benchmarks\", \"transfer-free-energies\", root_name, ) os.makedirs(os.path.join(root_path, \"schemas\"))", "entry in data_set.entries): solute = [ component.smiles for component in entry.components if component.role", "entry in (entry for data_set in data_sets for entry in data_set.entries): solute =", "\"result\", \"optimize\", \"force-field.offxml\", ) ) for force_field_path in tqdm(force_field_paths, desc=\"force field\"): root_name =", "= [] for entry in (entry for data_set in data_sets for entry in", "0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00,", "def main(): data_sets = [ DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", )", "0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10,", "\"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ]", "= TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State( temperature=298.15 * unit.kelvin,", "\"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for entry in", "n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80,", "0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,", "unit.kelvin, pressure=1.0 * unit.atmosphere ), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75,", ") force_field.to_file( os.path.join(root_path, \"force-field.offxml\"), discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with open(os.path.join(root_path,", "\"physical-property\", \"benchmarks\", \"sage-fsolv-test-v1.json\", ) ), DataSet.parse_file( os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", )", "production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95,", "), ] schemas = [] for entry in (entry for data_set in data_sets", "= [ component.smiles for component in entry.components if component.role == \"Solute\" ][0] solvent", "[ component.smiles for component in entry.components if component.role == \"Solute\" ][0] solvent =", "os.path.join( \"..\", \"data-set-curation\", \"physical-property\", \"benchmarks\", \"sage-mnsol-test-v1.json\", ) ), ] schemas = [] for", "== \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ),", "in entry.components if component.role == \"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1},", "n_steps_per_iteration=500, n_iterations=2000 ), ), alchemical_protocol_b=EquilibriumProtocol( lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90,", "0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ],", "0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25, 0.00, 0.00,", "0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol(", "0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[", "ForceField from openmm import unit from tqdm import tqdm def main(): data_sets =", "\"Solvent\" ][0] schema = TransferFreeEnergySchema( system=System( solutes={solute: 1}, solvent_a=None, solvent_b={solvent: 1000} ), state=State(", "), alchemical_protocol_a=EquilibriumProtocol( lambda_sterics=[1.0, 1.0, 1.0, 1.0, 1.0], lambda_electrostatics=[1.0, 0.75, 0.5, 0.25, 0.0], sampler=\"repex\",", "lambda_sterics=[ 1.00, 1.00, 1.00, 1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40,", "n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema) force_field_paths = glob( os.path.join( \"..\", \"inputs-and-results\", \"optimizations\",", "discard_cosmetic_attributes=True, ) for i, schema in enumerate(schemas): with open(os.path.join(root_path, \"schemas\", f\"{i + 1}.json\"),", "1.00, 1.00, 0.95, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.35, 0.30, 0.25, 0.20,", "0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.00, ], lambda_electrostatics=[ 1.00, 0.75, 0.50, 0.25,", "0.00, 0.00, 0.00, 0.00, ], sampler=\"repex\", production_protocol=SimulationProtocol( n_steps_per_iteration=500, n_iterations=2000 ), ), ) schemas.append(schema)" ]
[ "-1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time = -1", "-1 self.picked_time = -1 self.processed_time = -1 self.send_time = -1 self.cache_send_time = -1", "= -1 self.picked_time = -1 self.processed_time = -1 self.send_time = -1 self.cache_send_time =", "self.final_time = -1 self.delta_time = -1 self.values = {} self.checked_cache = False self.cache_result", "-1 self.values = {} self.checked_cache = False self.cache_result = None self.sql_result = None", "Stamp: def __init__(self): self.enqueue_time = -1 self.picked_time = -1 self.processed_time = -1 self.send_time", "self.enqueue_time = -1 self.picked_time = -1 self.processed_time = -1 self.send_time = -1 self.cache_send_time", "-1 self.processed_time = -1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time = -1", "= False self.cache_result = None self.sql_result = None def visited(self): return self.picked_time !=", "-1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time = -1", "= -1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time =", "{} self.checked_cache = False self.cache_result = None self.sql_result = None def visited(self): return", "self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time = -1 self.values", "<filename>app/simulator/stamp.py<gh_stars>0 class Stamp: def __init__(self): self.enqueue_time = -1 self.picked_time = -1 self.processed_time =", "self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time", "= -1 self.final_time = -1 self.delta_time = -1 self.values = {} self.checked_cache =", "= -1 self.processed_time = -1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time =", "-1 self.final_time = -1 self.delta_time = -1 self.values = {} self.checked_cache = False", "self.delta_time = -1 self.values = {} self.checked_cache = False self.cache_result = None self.sql_result", "self.picked_time = -1 self.processed_time = -1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time", "= -1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time = -1 self.values =", "self.checked_cache = False self.cache_result = None self.sql_result = None def visited(self): return self.picked_time", "self.processed_time = -1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time", "= {} self.checked_cache = False self.cache_result = None self.sql_result = None def visited(self):", "-1 self.delta_time = -1 self.values = {} self.checked_cache = False self.cache_result = None", "= -1 self.send_time = -1 self.cache_send_time = -1 self.cache_get_back_time = -1 self.final_time =", "class Stamp: def __init__(self): self.enqueue_time = -1 self.picked_time = -1 self.processed_time = -1", "False self.cache_result = None self.sql_result = None def visited(self): return self.picked_time != -1", "self.cache_get_back_time = -1 self.final_time = -1 self.delta_time = -1 self.values = {} self.checked_cache", "self.values = {} self.checked_cache = False self.cache_result = None self.sql_result = None def", "= -1 self.values = {} self.checked_cache = False self.cache_result = None self.sql_result =", "= -1 self.delta_time = -1 self.values = {} self.checked_cache = False self.cache_result =", "-1 self.cache_get_back_time = -1 self.final_time = -1 self.delta_time = -1 self.values = {}", "def __init__(self): self.enqueue_time = -1 self.picked_time = -1 self.processed_time = -1 self.send_time =", "__init__(self): self.enqueue_time = -1 self.picked_time = -1 self.processed_time = -1 self.send_time = -1" ]
[ "# by using distinct lookups for admin/non-admin applications (which we should do regardless", "class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) |", "can_add(self, user, source_model): # avoid in-line addition of users by accident return False", "django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.db.models import Q from django.urls", "and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user) def get_result(self, obj):", "you will need to install/enable the 'ajax_select' django module, and also add an", "return str(obj) def format_match(self, obj): return escape(str(obj)) # returning the admin URL reduces", "obj): return obj.username def format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model): #", "class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model", "'all'} class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock = True extra_params", "escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock = True model = Bid modelName", "table (the table of all lookups used by this application are in tracker/ajax_lookup_channels.py)", "return False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter(", "False extra_params = {} def get_extra_params(self, request): return self.extra_params def get_query(self, q, request):", "we should do regardless since # non-admin search should be different) def format_item_display(self,", "extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model = Bid modelName", "def get_result(self, obj): return obj.username def format_match(self, obj): return escape(obj.username) def can_add(self, user,", "SpeedRun, ) \"\"\" In order to use these lookups properly with the admin,", "obj.username def format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model): # avoid in-line", "True model = Bid modelName = 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup):", "not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user) def get_result(self, obj): return", "'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model = Bid", "import get_user_model from django.core.exceptions import PermissionDenied from django.db.models import Q from django.urls import", "to install/enable the 'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS table (the", "'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock = True model", "= True model = Bid modelName = 'allbids' extra_params = {'feed': 'all'} class", "mark_safe(result) class BidLookup(GenericLookup): useLock = True model = Bid modelName = 'bid' extra_params", "import escape from django.utils.safestring import mark_safe import tracker.search_filters as filters from tracker.models import", "table of all lookups used by this application are in tracker/ajax_lookup_channels.py) They can", "from django.db.models import Q from django.urls import reverse from django.utils.html import escape from", "{'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'):", "escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params = {} def get_extra_params(self, request): return", "class CountryLookup(LookupChannel): model = Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self,", "not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self,", "class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock = True extra_params =", "( Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In", "| Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj))", "Bid modelName = 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True", "Q from django.urls import reverse from django.utils.html import escape from django.utils.safestring import mark_safe", "reverse from django.utils.html import escape from django.utils.safestring import mark_safe import tracker.search_filters as filters", "countries typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request):", "reduces the genericity of our solution a little bit, but this can be", "= get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise", "True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock = True", "regardless since # non-admin search should be different) def format_item_display(self, obj): result =", "= Bid modelName = 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock =", "obj): return str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False", "def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), )", "return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) def", "useLock = True model = Bid modelName = 'allbids' extra_params = {'feed': 'all'}", "to use these lookups properly with the admin, you will need to install/enable", "__init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request):", "self.extra_params def get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self,", "{'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model = Bid modelName = 'allbids'", "escape(str(obj)) # returning the admin URL reduces the genericity of our solution a", "request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username", "return escape(obj.username) def can_add(self, user, source_model): # avoid in-line addition of users by", "= {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock =", "Bid modelName = 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid", "params['locked'] = False return filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj) def", "format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return", "URL reduces the genericity of our solution a little bit, but this can", "should be different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk]", "class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model = SpeedRun useLock = True", "= Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model = SpeedRun useLock", "of all lookups used by this application are in tracker/ajax_lookup_channels.py) They can be", "using distinct lookups for admin/non-admin applications (which we should do regardless since #", "return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params = {} def get_extra_params(self, request):", "useLock = True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model = Prize", "this application are in tracker/ajax_lookup_channels.py) They can be imported with the line: from", "Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def", "return str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params", "lookups properly with the admin, you will need to install/enable the 'ajax_select' django", "def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self,", "def can_add(self, user, source_model): # Presumably, we don't want to add countries typically", "SpeedRun useLock = True class EventLookup(GenericLookup): model = Event useLock = True class", "the 'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS table (the table of", "the admin URL reduces the genericity of our solution a little bit, but", "q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return", "<filename>tracker/lookups.py from ajax_select import LookupChannel from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied", "from ajax_select import LookupChannel from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from", "= Prize class RunLookup(GenericLookup): model = SpeedRun useLock = True class EventLookup(GenericLookup): model", "def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj):", "super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return", "we don't want to add countries typically return False class CountryRegionLookup(LookupChannel): model =", "= Bid modelName = 'bidtarget' useLock = True extra_params = {'feed': 'all'} class", "a little bit, but this can be solved # by using distinct lookups", "params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] =", "class GenericLookup(LookupChannel): useLock = False extra_params = {} def get_extra_params(self, request): return self.extra_params", "import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup,", "self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user)", "useLock = True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock", "return mark_safe(result) class BidLookup(GenericLookup): useLock = True model = Bid modelName = 'bid'", "def format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model): # Presumably, we don't", "request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def", "be imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def", "CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q)", "from django.core.exceptions import PermissionDenied from django.db.models import Q from django.urls import reverse from", "return escape(str(obj)) def can_add(self, user, source_model): # Presumably, we don't want to add", "get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) # returning the admin", "get_extra_params(self, request): return self.extra_params def get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request))", "# avoid in-line addition of users by accident return False class CountryLookup(LookupChannel): model", "lookups for admin/non-admin applications (which we should do regardless since # non-admin search", "an AJAX_LOOKUP_CHANNELS table (the table of all lookups used by this application are", "model = Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model = SpeedRun", "all lookups used by this application are in tracker/ajax_lookup_channels.py) They can be imported", "tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model()", "add an AJAX_LOOKUP_CHANNELS table (the table of all lookups used by this application", "Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def format_match(self, obj): return", "import mark_safe import tracker.search_filters as filters from tracker.models import ( Bid, Country, CountryRegion,", "Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In order to use these lookups", "avoid in-line addition of users by accident return False class CountryLookup(LookupChannel): model =", "to add countries typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self,", "little bit, but this can be solved # by using distinct lookups for", "Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model = SpeedRun useLock =", "= SpeedRun useLock = True class EventLookup(GenericLookup): model = Event useLock = True", "Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In order", "by this application are in tracker/ajax_lookup_channels.py) They can be imported with the line:", "obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result)", "= getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return", "by accident return False class CountryLookup(LookupChannel): model = Country def get_query(self, q, request):", "False class CountryLookup(LookupChannel): model = Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def", "CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In order to use", "since # non-admin search should be different) def format_item_display(self, obj): result = '<a", "module, and also add an AJAX_LOOKUP_CHANNELS table (the table of all lookups used", "should do regardless since # non-admin search should be different) def format_item_display(self, obj):", "'all'} class AllBidLookup(GenericLookup): useLock = True model = Bid modelName = 'allbids' extra_params", "get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return", "import PermissionDenied from django.db.models import Q from django.urls import reverse from django.utils.html import", "from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model =", "'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS table (the table of all", "application are in tracker/ajax_lookup_channels.py) They can be imported with the line: from tracker.ajax_lookup_channels", "return obj.username def format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model): # avoid", "q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj)", "request.user) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) # returning", "{'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock = True", "as filters from tracker.models import ( Bid, Country, CountryRegion, Donation, Donor, Event, Prize,", "Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In order to use these", "genericity of our solution a little bit, but this can be solved #", "def get_extra_params(self, request): return self.extra_params def get_query(self, q, request): params = {'q': q}", "also add an AJAX_LOOKUP_CHANNELS table (the table of all lookups used by this", "UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self,", "AJAX_LOOKUP_CHANNELS table (the table of all lookups used by this application are in", "Runner, SpeedRun, ) \"\"\" In order to use these lookups properly with the", "don't want to add countries typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion", "= False extra_params = {} def get_extra_params(self, request): return self.extra_params def get_query(self, q,", "obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params = {} def get_extra_params(self,", "get_result(self, obj): return obj.username def format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model):", "filters from tracker.models import ( Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner,", "class AllBidLookup(GenericLookup): useLock = True model = Bid modelName = 'allbids' extra_params =", "from django.utils.html import escape from django.utils.safestring import mark_safe import tracker.search_filters as filters from", "get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self, obj):", "are in tracker/ajax_lookup_channels.py) They can be imported with the line: from tracker.ajax_lookup_channels import", "= True model = Bid modelName = 'bid' extra_params = {'feed': 'all'} class", "model = Prize class RunLookup(GenericLookup): model = SpeedRun useLock = True class EventLookup(GenericLookup):", "class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def", "get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock =", "CountryLookup(LookupChannel): model = Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj):", "from django.urls import reverse from django.utils.html import escape from django.utils.safestring import mark_safe import", "model = Bid modelName = 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock", "Prize, Runner, SpeedRun, ) \"\"\" In order to use these lookups properly with", ") def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel):", "in tracker/ajax_lookup_channels.py) They can be imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS", "add countries typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q,", "of users by accident return False class CountryLookup(LookupChannel): model = Country def get_query(self,", "They can be imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class", "but this can be solved # by using distinct lookups for admin/non-admin applications", "def get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName',", "class DonationLookup(GenericLookup): model = Donation useLock = True class DonorLookup(GenericLookup): model = Donor", "= 'bidtarget' useLock = True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model =", "= {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock = True class DonorLookup(GenericLookup):", "admin, you will need to install/enable the 'ajax_select' django module, and also add", "RunLookup(GenericLookup): model = SpeedRun useLock = True class EventLookup(GenericLookup): model = Event useLock", "Donation useLock = True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model =", "will need to install/enable the 'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS", "format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params = {} def", "model = getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False", "order to use these lookups properly with the admin, you will need to", "'<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock", "escape from django.utils.safestring import mark_safe import tracker.search_filters as filters from tracker.models import (", "extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock = True class", "q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self, obj): return", "Event, Prize, Runner, SpeedRun, ) \"\"\" In order to use these lookups properly", "Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\" In order to", "source_model): # Presumably, we don't want to add countries typically return False class", "= CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def", "DonationLookup(GenericLookup): model = Donation useLock = True class DonorLookup(GenericLookup): model = Donor class", "href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock =", "useLock = False extra_params = {} def get_extra_params(self, request): return self.extra_params def get_query(self,", "tracker.search_filters as filters from tracker.models import ( Bid, Country, CountryRegion, Donation, Donor, Event,", "user, source_model): # avoid in-line addition of users by accident return False class", "raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self, obj): return", "PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self, obj): return escape(obj.username)", "def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params = {}", "request): params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock", "*args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if", "str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock = False extra_params =", "args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock = True model =", "In order to use these lookups properly with the admin, you will need", "PermissionDenied from django.db.models import Q from django.urls import reverse from django.utils.html import escape", "import LookupChannel from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.db.models import", "typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request): return", "def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self,", "q, request): params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if", "\"\"\" In order to use these lookups properly with the admin, you will", "can_add(self, user, source_model): # Presumably, we don't want to add countries typically return", "django.urls import reverse from django.utils.html import escape from django.utils.safestring import mark_safe import tracker.search_filters", "self).__init__(*args, **kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q)", "these lookups properly with the admin, you will need to install/enable the 'ajax_select'", "request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj))", "if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def", "the admin, you will need to install/enable the 'ajax_select' django module, and also", "source_model): # avoid in-line addition of users by accident return False class CountryLookup(LookupChannel):", "non-admin search should be different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse(", "'bidtarget' useLock = True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation", "import reverse from django.utils.html import escape from django.utils.safestring import mark_safe import tracker.search_filters as", "return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def format_match(self,", "Bid modelName = 'bidtarget' useLock = True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup):", "applications (which we should do regardless since # non-admin search should be different)", "= 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName =", "obj): return escape(str(obj)) # returning the admin URL reduces the genericity of our", "(the table of all lookups used by this application are in tracker/ajax_lookup_channels.py) They", "= Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj)", "AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args,", "def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) class GenericLookup(LookupChannel): useLock", "False return filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj) def format_match(self, obj):", "= True class EventLookup(GenericLookup): model = Event useLock = True class RunnerLookup(GenericLookup): model", "for admin/non-admin applications (which we should do regardless since # non-admin search should", "django.core.exceptions import PermissionDenied from django.db.models import Q from django.urls import reverse from django.utils.html", "Presumably, we don't want to add countries typically return False class CountryRegionLookup(LookupChannel): model", "admin/non-admin applications (which we should do regardless since # non-admin search should be", "True model = Bid modelName = 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup):", "BidLookup(GenericLookup): useLock = True model = Bid modelName = 'bid' extra_params = {'feed':", "# non-admin search should be different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format(", "{} def get_extra_params(self, request): return self.extra_params def get_query(self, q, request): params = {'q':", "used by this application are in tracker/ajax_lookup_channels.py) They can be imported with the", "do regardless since # non-admin search should be different) def format_item_display(self, obj): result", "Country.objects.filter(name__icontains=q) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self,", "obj): return str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model): #", "BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock = True extra_params = {'feed':", "format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model): # avoid in-line addition of", "from tracker.models import ( Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun,", "escape(obj.username) def can_add(self, user, source_model): # avoid in-line addition of users by accident", "this can be solved # by using distinct lookups for admin/non-admin applications (which", "self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'):", "bit, but this can be solved # by using distinct lookups for admin/non-admin", "reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock = True", "search should be different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name),", "= Donation useLock = True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model", "users by accident return False class CountryLookup(LookupChannel): model = Country def get_query(self, q,", "model = Bid modelName = 'bidtarget' useLock = True extra_params = {'feed': 'all'}", "def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q,", "get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj):", "class EventLookup(GenericLookup): model = Event useLock = True class RunnerLookup(GenericLookup): model = Runner", "modelName = 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model", "admin URL reduces the genericity of our solution a little bit, but this", "useLock = True class EventLookup(GenericLookup): model = Event useLock = True class RunnerLookup(GenericLookup):", "False class CountryRegionLookup(LookupChannel): model = CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q)", "getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model,", "in-line addition of users by accident return False class CountryLookup(LookupChannel): model = Country", "escape(str(obj)) def can_add(self, user, source_model): # Presumably, we don't want to add countries", ") return mark_safe(result) class BidLookup(GenericLookup): useLock = True model = Bid modelName =", "= {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model = Bid modelName =", "get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied", "extra_params = {} def get_extra_params(self, request): return self.extra_params def get_query(self, q, request): params", "extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget' useLock", "= True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock =", "model = CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) )", "'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName = 'bidtarget'", "if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user) def", "= 'bid' extra_params = {'feed': 'all'} class AllBidLookup(GenericLookup): useLock = True model =", "return self.extra_params def get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request)) model =", "params, request.user) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) #", "DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model =", "class BidLookup(GenericLookup): useLock = True model = Bid modelName = 'bid' extra_params =", "django.utils.html import escape from django.utils.safestring import mark_safe import tracker.search_filters as filters from tracker.models", "**kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs) def get_query(self, q, request): if not", "def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) # returning the", "= {} def get_extra_params(self, request): return self.extra_params def get_query(self, q, request): params =", "# returning the admin URL reduces the genericity of our solution a little", "properly with the admin, you will need to install/enable the 'ajax_select' django module,", "# Presumably, we don't want to add countries typically return False class CountryRegionLookup(LookupChannel):", "format_match(self, obj): return escape(str(obj)) # returning the admin URL reduces the genericity of", "= '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup):", "solution a little bit, but this can be solved # by using distinct", "str(obj) def format_match(self, obj): return escape(str(obj)) # returning the admin URL reduces the", "= Bid modelName = 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model =", "return escape(str(obj)) # returning the admin URL reduces the genericity of our solution", "CountryRegion def get_query(self, q, request): return CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self,", "CountryRegion.objects.filter( Q(name__icontains=q) | Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def format_match(self, obj):", "with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args,", "q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked']", "LookupChannel from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.db.models import Q", "can be solved # by using distinct lookups for admin/non-admin applications (which we", "return filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj) def format_match(self, obj): return", "return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self, obj): return escape(obj.username) def", "True class EventLookup(GenericLookup): model = Event useLock = True class RunnerLookup(GenericLookup): model =", "use these lookups properly with the admin, you will need to install/enable the", "model = Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q) def get_result(self, obj): return", "returning the admin URL reduces the genericity of our solution a little bit,", "our solution a little bit, but this can be solved # by using", "distinct lookups for admin/non-admin applications (which we should do regardless since # non-admin", "useLock = True model = Bid modelName = 'bid' extra_params = {'feed': 'all'}", "install/enable the 'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS table (the table", "obj): return escape(str(obj)) def can_add(self, user, source_model): # Presumably, we don't want to", "request): return self.extra_params def get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request)) model", "from django.utils.safestring import mark_safe import tracker.search_filters as filters from tracker.models import ( Bid,", "with the admin, you will need to install/enable the 'ajax_select' django module, and", "GenericLookup(LookupChannel): useLock = False extra_params = {} def get_extra_params(self, request): return self.extra_params def", "def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self, user,", "get_user_model from django.core.exceptions import PermissionDenied from django.db.models import Q from django.urls import reverse", "return str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model): # Presumably,", "self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user) def get_result(self,", "params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock and", "import ( Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, ) \"\"\"", "modelName = 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model = Bid modelName", "model = Bid modelName = 'allbids' extra_params = {'feed': 'all'} class BidTargetLookup(GenericLookup): model", "addition of users by accident return False class CountryLookup(LookupChannel): model = Country def", "ajax_select import LookupChannel from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.db.models", "(which we should do regardless since # non-admin search should be different) def", "different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)),", "the genericity of our solution a little bit, but this can be solved", "want to add countries typically return False class CountryRegionLookup(LookupChannel): model = CountryRegion def", "filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj))", "accident return False class CountryLookup(LookupChannel): model = Country def get_query(self, q, request): return", "obj): return str(obj) def format_match(self, obj): return escape(str(obj)) # returning the admin URL", "str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model): # Presumably, we", "by using distinct lookups for admin/non-admin applications (which we should do regardless since", "tracker/ajax_lookup_channels.py) They can be imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\"", "{'feed': 'all'} class DonationLookup(GenericLookup): model = Donation useLock = True class DonorLookup(GenericLookup): model", "be solved # by using distinct lookups for admin/non-admin applications (which we should", "imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self,", "= False return filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj) def format_match(self,", "obj): return escape(obj.username) def can_add(self, user, source_model): # avoid in-line addition of users", "class RunLookup(GenericLookup): model = SpeedRun useLock = True class EventLookup(GenericLookup): model = Event", "django.utils.safestring import mark_safe import tracker.search_filters as filters from tracker.models import ( Bid, Country,", "model = Donation useLock = True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup):", "return False class CountryLookup(LookupChannel): model = Country def get_query(self, q, request): return Country.objects.filter(name__icontains=q)", "def format_match(self, obj): return escape(str(obj)) # returning the admin URL reduces the genericity", "user, source_model): # Presumably, we don't want to add countries typically return False", "Prize class RunLookup(GenericLookup): model = SpeedRun useLock = True class EventLookup(GenericLookup): model =", "def can_add(self, user, source_model): # avoid in-line addition of users by accident return", "solved # by using distinct lookups for admin/non-admin applications (which we should do", "mark_safe import tracker.search_filters as filters from tracker.models import ( Bid, Country, CountryRegion, Donation,", "of our solution a little bit, but this can be solved # by", "Q(country__name__icontains=q) ) def get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) class", "and also add an AJAX_LOOKUP_CHANNELS table (the table of all lookups used by", "PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup): model = SpeedRun useLock = True class", "django.db.models import Q from django.urls import reverse from django.utils.html import escape from django.utils.safestring", "import tracker.search_filters as filters from tracker.models import ( Bid, Country, CountryRegion, Donation, Donor,", "lookups used by this application are in tracker/ajax_lookup_channels.py) They can be imported with", "AllBidLookup(GenericLookup): useLock = True model = Bid modelName = 'allbids' extra_params = {'feed':", "need to install/enable the 'ajax_select' django module, and also add an AJAX_LOOKUP_CHANNELS table", "), escape(str(obj)), ) return mark_safe(result) class BidLookup(GenericLookup): useLock = True model = Bid", "get_result(self, obj): return str(obj) def format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model):", "'all'} class DonationLookup(GenericLookup): model = Donation useLock = True class DonorLookup(GenericLookup): model =", "tracker.models import ( Bid, Country, CountryRegion, Donation, Donor, Event, Prize, Runner, SpeedRun, )", "format_match(self, obj): return escape(str(obj)) def can_add(self, user, source_model): # Presumably, we don't want", "request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params, request.user) def get_result(self, obj): return str(obj)", "result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ), escape(str(obj)), ) return mark_safe(result) class", "model = SpeedRun useLock = True class EventLookup(GenericLookup): model = Event useLock =", "**kwargs) def get_query(self, q, request): if not request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def", ") \"\"\" In order to use these lookups properly with the admin, you", "import Q from django.urls import reverse from django.utils.html import escape from django.utils.safestring import", "get_query(self, q, request): params = {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model)", "True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model = Prize class RunLookup(GenericLookup):", "def format_match(self, obj): return escape(obj.username) def can_add(self, user, source_model): # avoid in-line addition", "can be imported with the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel):", "'modelName', self.model) if self.useLock and not request.user.has_perm('tracker.can_edit_locked_events'): params['locked'] = False return filters.run_model_query(model, params,", "modelName = 'bidtarget' useLock = True extra_params = {'feed': 'all'} class DonationLookup(GenericLookup): model", "self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self, obj): return escape(obj.username) def can_add(self,", "= True class DonorLookup(GenericLookup): model = Donor class PrizeLookup(GenericLookup): model = Prize class", "\"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model = get_user_model() super(UserLookup, self).__init__(*args, **kwargs)", "request.user.has_perm('tracker.can_search'): raise PermissionDenied return self.model.objects.filter(username__icontains=q) def get_result(self, obj): return obj.username def format_match(self, obj):", "line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs): self.model", "= {'q': q} params.update(self.get_extra_params(request)) model = getattr(self, 'modelName', self.model) if self.useLock and not", "from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.db.models import Q from", "be different) def format_item_display(self, obj): result = '<a href=\"{0}\">{1}</a>'.format( reverse( 'admin:tracker_{0}_change'.format(obj._meta.model_name), args=[obj.pk] ),", "the line: from tracker.ajax_lookup_channels import AJAX_LOOKUP_CHANNELS \"\"\" class UserLookup(LookupChannel): def __init__(self, *args, **kwargs):", "django module, and also add an AJAX_LOOKUP_CHANNELS table (the table of all lookups" ]
[ "if value & 1: temp = -temp data = data[:-3] id = int(data,", "data = data[:-2] humidity = int(data[-2:], 16) data = data[:-2] value = int(data[-3:],", "data[:-2] humidity = int(data[-2:], 16) data = data[:-2] value = int(data[-3:], 16) temp", "= data[:-2] value = int(data[-3:], 16) temp = (value & 0x7FF) / 10", "value >>= 11 if value & 1: temp = -temp data = data[:-3]", "dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict( packet, model=\"temperature\", sensorId=id,", "packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict( packet, model=\"temperature\", sensorId=id, data=dict(temp=temp)", "data = data[:-3] id = int(data, 16) & 0xFF if humidity <= 100:", "int(data[-2:], 16) data = data[:-2] value = int(data[-3:], 16) temp = (value &", ">>= 11 if value & 1: temp = -temp data = data[:-3] id", "temp = -temp data = data[:-3] id = int(data, 16) & 0xFF if", "= int(data[-2:], 16) data = data[:-2] value = int(data[-3:], 16) temp = (value", "2.6 \"\"\" data = packet[\"data\"] data = \"%010x\" % int(data) data = data[:-2]", "return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict( packet, model=\"temperature\",", "humidity = int(data[-2:], 16) data = data[:-2] value = int(data[-3:], 16) temp =", "model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict( packet, model=\"temperature\", sensorId=id, data=dict(temp=temp) )", "100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict( packet,", "int(data, 16) & 0xFF if humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id,", "= int(data, 16) & 0xFF if humidity <= 100: return dict( packet, model=\"temperaturehumidity\",", "16) data = data[:-2] value = int(data[-3:], 16) temp = (value & 0x7FF)", "decode(packet): \"\"\" https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp >>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data = \"%010x\"", "10 value >>= 11 if value & 1: temp = -temp data =", "= \"%010x\" % int(data) data = data[:-2] humidity = int(data[-2:], 16) data =", "= data[:-3] id = int(data, 16) & 0xFF if humidity <= 100: return", "= -temp data = data[:-3] id = int(data, 16) & 0xFF if humidity", "temp = (value & 0x7FF) / 10 value >>= 11 if value &", ">>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data = \"%010x\" % int(data) data", "= int(data[-3:], 16) temp = (value & 0x7FF) / 10 value >>= 11", "value & 1: temp = -temp data = data[:-3] id = int(data, 16)", "data[:-3] id = int(data, 16) & 0xFF if humidity <= 100: return dict(", "\"\"\" data = packet[\"data\"] data = \"%010x\" % int(data) data = data[:-2] humidity", "value = int(data[-3:], 16) temp = (value & 0x7FF) / 10 value >>=", "decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data = \"%010x\" % int(data) data =", "= packet[\"data\"] data = \"%010x\" % int(data) data = data[:-2] humidity = int(data[-2:],", "0x7FF) / 10 value >>= 11 if value & 1: temp = -temp", "0xFF if humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), )", "<gh_stars>10-100 def decode(packet): \"\"\" https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp >>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data", "int(data) data = data[:-2] humidity = int(data[-2:], 16) data = data[:-2] value =", "16) temp = (value & 0x7FF) / 10 value >>= 11 if value", "def decode(packet): \"\"\" https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp >>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data =", "if humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else:", "(value & 0x7FF) / 10 value >>= 11 if value & 1: temp", "humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return", "& 1: temp = -temp data = data[:-3] id = int(data, 16) &", "= (value & 0x7FF) / 10 value >>= 11 if value & 1:", "-temp data = data[:-3] id = int(data, 16) & 0xFF if humidity <=", "id = int(data, 16) & 0xFF if humidity <= 100: return dict( packet,", "= data[:-2] humidity = int(data[-2:], 16) data = data[:-2] value = int(data[-3:], 16)", "data = data[:-2] value = int(data[-3:], 16) temp = (value & 0x7FF) /", "data = packet[\"data\"] data = \"%010x\" % int(data) data = data[:-2] humidity =", "packet[\"data\"] data = \"%010x\" % int(data) data = data[:-2] humidity = int(data[-2:], 16)", "16) & 0xFF if humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity,", "data[:-2] value = int(data[-3:], 16) temp = (value & 0x7FF) / 10 value", "11 if value & 1: temp = -temp data = data[:-3] id =", "1: temp = -temp data = data[:-3] id = int(data, 16) & 0xFF", "int(data[-3:], 16) temp = (value & 0x7FF) / 10 value >>= 11 if", "& 0xFF if humidity <= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp),", "\"\"\" https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp >>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data = \"%010x\" %", "<= 100: return dict( packet, model=\"temperaturehumidity\", sensorId=id, data=dict(humidity=humidity, temp=temp), ) else: return dict(", "https://github.com/telldus/telldus/blob/master/telldus-core/service/ProtocolFineoffset.cpp >>> decode(dict(data=0x48801aff05))[\"data\"][\"temp\"] 2.6 \"\"\" data = packet[\"data\"] data = \"%010x\" % int(data)", "/ 10 value >>= 11 if value & 1: temp = -temp data", "\"%010x\" % int(data) data = data[:-2] humidity = int(data[-2:], 16) data = data[:-2]", "& 0x7FF) / 10 value >>= 11 if value & 1: temp =", "% int(data) data = data[:-2] humidity = int(data[-2:], 16) data = data[:-2] value", "data = \"%010x\" % int(data) data = data[:-2] humidity = int(data[-2:], 16) data" ]
[ "in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) #", "index - len(dataset) raise(ValueError, f'index exceeds total number of instances, index {index}') def", "f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root,", "# 3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ... 4 0 #", "dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] #", "set target: which dataset to represent \"\"\" if subset not in ('background', 'evaluation'):", "skimage import io from tqdm import tqdm import pandas as pd import numpy", "9: 0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84),", "= self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id =", "else: index = index - len(dataset) raise(ValueError, f'index exceeds total number of instances,", "... 2 0 # 3 n01770081 ... 3 0 # 4 n01770081 ...", "def __len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset", "folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in", "... 0 0 # 1 n01770081 ... 1 0 # 2 n01770081 ...", "sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for", "the dataset n_classes: Number of distinct classes in the dataset n_features: Number of", "= (instance - instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from", "# 2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ... 3 0 #", "@staticmethod def index_subset(subset): \"\"\"Index a subset by looping through all of its files", "pass to find total for tqdm bar subset_len = 0 for root, folders,", "# 3 Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01 ... 4", "subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len +=", "for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue", "subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images", "tqdm import pandas as pd import numpy as np import os from typing", "be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target", "correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names", "in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id +", "in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...}", "'042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict:", "the remaining features are the class index. # Arguments samples_per_class: Number of samples", "dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset", "self.n_classes def __getitem__(self, item): class_id = item % self.n_classes return np.array([item] + [class_id]*self.n_features,", "import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset", "dataset # Arguments: subset: Whether the dataset represents the background or evaluation set", "# int return instance, label def __len__(self): return sum([len(dataset) for dataset in self.dataset_list])", "return dataset_id, index else: index = index - len(dataset) raise(ValueError, f'index exceeds total", "dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label", "n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample from the DummyDataset has (n_features", "# [28, 28] # Reindex to channels first format as supported by pytorch", "... 1 0 # 2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ...", "instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset label =", "images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes", "index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label in", "information. # Arguments subset: Name of the subset # Returns A list of", "dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass to", "# {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960}", "the miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train'", "6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12:", "; windows \\\\ # n01770081 for f in files: progress_bar.update(1) images.append({ 'subset': subset,", "\"\"\"Dataset class representing Omniglot dataset # Arguments: subset: Whether the dataset represents the", "\"\"\"Dummy dataset for debugging/testing purposes A sample from the DummyDataset has (n_features +", "purposes A sample from the DummyDataset has (n_features + 1) features. The first", "class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0 # 1", "files in a particular subset of the Omniglot dataset dataset \"\"\" images =", "return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by looping through all of", "{'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) #", "[class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a", "= pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to item in dataset", "def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) #", "or evaluation set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must", "dataset_id, index else: index = index - len(dataset) raise(ValueError, f'index exceeds total number", "= index - len(dataset) raise(ValueError, f'index exceeds total number of instances, index {index}')", "true_label = self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset label = self.datasetid_to_class_id[item]", "dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id =", "@staticmethod def index_subset(subset, target): \"\"\"Index a subset by looping through all of its", "= samples_per_class self.n_classes = n_classes self.n_features = n_features # Create a dataframe to", "= 'train' if subset == 'background' else 'val' # Quick first pass to", "(CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset,", "= n_features # Create a dataframe to be consistent with other Datasets self.df", "Number of distinct classes in the dataset n_features: Number of extra features each", "images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close()", "dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\" A mapping", "= OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to", "to the index in the corresponding dataset. :param index: :return: dataset_id, item \"\"\"", "# {dict: 52720} # {0: 0, 1: 0, 2: 0, 3: 0, 4:", "Whether the dataset represents the background or evaluation set target: which dataset to", "dataframe has direct correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert", "{self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02':", ":, :] # [1, 28, 28] # Normalise to 0-1 instance = (instance", "...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet:", "self.datasetid_to_class_id[item] # int return instance, label def __len__(self): return sum([len(dataset) for dataset in", "root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet", "= self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1: 0, 2: 0, 3:", "self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for i in range(len(self))] }) self.df", "014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root,", "for i in range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551':", "JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] #", "PIL import Image from torchvision import transforms from skimage import io from tqdm", "samples_per_class self.n_classes = n_classes self.n_features = n_features # Create a dataframe to be", "direct correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class", "# Reindex to channels first format as supported by pytorch instance = instance[np.newaxis,", "-> (int, int): \"\"\" A mapping method to map index (in __getitem__ method)", "as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to", "7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, ...}", "ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02',", "self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance)", "return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing", "subset_len += len([f for f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for", "+ '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux", "{dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} #", "transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance =", "os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self,", "...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1: 0, 2:", "index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset +", "(int, int): \"\"\" A mapping method to map index (in __getitem__ method) to", "2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id", "evaluation)') self.subset = subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of", "first format as supported by pytorch instance = instance[np.newaxis, :, :] # [1,", "self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird: 960}", "# 0 Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01 ... 1", "0 # 3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ... 4 0", "DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset #", "# Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,", "= root.split(os.sep)[-1] # linux / ; windows \\\\ # n01770081 for f in", "{dict: 52720} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0,", "OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to item", "= [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset == 'background' else", "# {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df =", "... 2 0 # 3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ...", "of dicts containing information about all the image files in a particular subset", "files and recording relevant information. # Arguments subset: Name of the subset #", "f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files in", "images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments:", "instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from 0 -> 2636", "index {index}') def label_mapping(self) -> Dict: \"\"\" generate mapping dict from datasetid to", "label = self.datasetid_to_class_id[item] # from 0 -> 16 return instance, label def __len__(self):", "/ ; windows \\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset':", "# Arguments: subset: Whether the dataset represents the background or evaluation set \"\"\"", "0 0 # 1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ... 2", "n01770081 ... 0 0 # 1 n01770081 ... 1 0 # 2 n01770081", "subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len", "n_features # Create a dataframe to be consistent with other Datasets self.df =", "subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close()", "{dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda", "class_name filepath subset id class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0 0", "'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20}", "in the dataset n_classes: Number of distinct classes in the dataset n_features: Number", "class names of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique())", "3 0 # 4 Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath", "\\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files:", "Arguments: subset: Whether the dataset represents the 'background' or 'evaluation' set \"\"\" if", "10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor()", "continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # 014.Indigo_Bunting for", "root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f", "one of (background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError,", "12000} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5:", "dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset): return", "= sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i", "len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset by", "subset by looping through all of its files and recording relevant information. #", "subset: Whether the dataset represents the 'background' or 'evaluation' set \"\"\" if subset", "# Arguments: subset: Whether the dataset represents the background or evaluation set target:", "]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance)", "3 n01770081 ... 3 0 # 4 n01770081 ... 4 0 # Create", "target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset represents", "pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to item in dataset self.df", "'subset must be one of (background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset))", "% self.n_classes for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return", "for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue", "not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird,", "class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A", "i in range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2,", "Returns A list of dicts containing information about all the image files in", "... 1 0 # 2 Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0", "Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01 ... 1 0 # 2", "len(dataset) raise(ValueError, f'index exceeds total number of instances, index {index}') def label_mapping(self) ->", "label def __len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for", "4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0:", "transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item])", "first pass to find total for tqdm bar subset_len = 0 for root,", "# ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}", "id class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0 #", "progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset", "# Quick first pass to find total for tqdm bar subset_len = 0", "in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH", "dataset for debugging/testing purposes A sample from the DummyDataset has (n_features + 1)", "len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\", "= sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...]", "datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] #", "__getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB')", "Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id", "tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) ==", "generate mapping dict from datasetid to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id", "a dataframe to be consistent with other Datasets self.df = pd.DataFrame({ 'class_id': [i", "# debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'), Meta('evaluation', 'FGVC_Aircraft')]) #", "0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0,", "f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) })", "1 n01770081 ... 1 0 # 2 n01770081 ... 2 0 # 3", "# {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0,", "the class index. # Arguments samples_per_class: Number of samples per class in the", "label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target):", "range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df", "be one of (background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'):", "will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ])", "11: 0, 12: 0, ...} # Setup transforms enable evaluation as OOD dataset", "root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue", "is the label in sub-dataset label = self.datasetid_to_class_id[item] # int return instance, label", "= self.datasetid_to_class_id[item] # int return instance, label def __len__(self): return sum([len(dataset) for dataset", "transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "+ class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset = class_id_offset +", "28] # Reindex to channels first format as supported by pytorch instance =", "of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first", "in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset to", "0 # 1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ... 2 0", "self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset to ordered 0-(num_speakers", "class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # n01770081 for f", "]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance)", "else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels first format", "files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath:", "0, 9: 0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84),", "0 0 # 1 Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01", "Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01 ... 3 0 # 4", "feature is the index of the sample in the data and the remaining", "Arguments samples_per_class: Number of samples per class in the dataset n_classes: Number of", "instances, index {index}') def label_mapping(self) -> Dict: \"\"\" generate mapping dict from datasetid", "{}...{}...'.format(target, subset)) folder_name = 'train' if subset == 'background' else 'val' # Quick", "= self.datasetid_to_class_id[item] # from 0 -> 16 return instance, label def __len__(self): return", "= target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct correspondence", "integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id =", "# ToTensor() will normalize to [0, 1] ]) def __getitem__(self, item): instance =", "of (background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target", "self.n_features = n_features # Create a dataframe to be consistent with other Datasets", "class_id {MiniImageNet: 12000} # 0 n01770081 ... 0 0 # 1 n01770081 ...", "...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636} #", "evaluation set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be", "(background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must", "- instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from 0 ->", "instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84, 84]", "'subset must be one of (background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture',", "self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset,", "+ '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet = root.split(os.sep)[-2] # linux /", "range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3,", "self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features = n_features # Create a dataframe", "= self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] #", "which dataset to represent \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset", "be one of (background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index", "= 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()),", "progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) #", "f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset):", "progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg", "# {dict: 12000} # {0: 0, 1: 0, 2: 0, 3: 0, 4:", "tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0:", "... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} #", "'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath", "* self.n_classes def __getitem__(self, item): class_id = item % self.n_classes return np.array([item] +", "8: 0, 9: 0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([", "Whether the dataset represents the background or evaluation set \"\"\" if subset not", "# {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))", "and the remaining features are the class index. # Arguments samples_per_class: Number of", "subset of the Omniglot dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) #", "self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]):", "set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be one", "typing import List, Dict from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset,", "class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # 014.Indigo_Bunting for f", "Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id", "0 # 4 Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath =", "image files in a particular subset of the Omniglot dataset dataset \"\"\" images", "\\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name,", "in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ == \"__main__\":", "= self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1: 0, 2: 0, 3:", "[16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in", "= subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to", "Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01 ... 2 0 # 3", "print('Indexing {}...'.format(subset)) # Quick first pass to find total for tqdm bar subset_len", "dataset # Arguments: subset: Whether the dataset represents the 'background' or 'evaluation' set", "progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for", "of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target self.df =", "__getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is", "Dict: \"\"\" generate mapping dict from datasetid to global class id. :return: datasetid_to_class_id", "5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11:", "'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c:", "8: 0, 9: 0, 10: 0, 11: 0, 12: 0, ...} # Setup", "Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,", "['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} #", "transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance", "import numpy as np import os from typing import List, Dict from config", "instance[np.newaxis, :, :] # [1, 28, 28] # Normalise to 0-1 instance =", "...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id", "folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name", "miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass to", "as pd import numpy as np import os from typing import List, Dict", "Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance) # [3, 84,", "i for i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1,", "raise(ValueError, 'subset must be one of (background, evaluation)') if target not in ('CUB_Bird',", "in self.dataset_list]) if __name__ == \"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation',", "self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 16", "{0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1:", "a particular subset of the Omniglot dataset dataset \"\"\" images = [] print('Indexing", "1 0 # 2 Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01", "class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance,", "self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1: 0, 2: 0,", "'subset must be one of (background, evaluation)') self.subset = subset self.OOD_test = OOD_test", "= self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] #", "format as supported by pytorch instance = instance[np.newaxis, :, :] # [1, 28,", "import Dataset import torch from PIL import Image from torchvision import transforms from", "0, 9: 0, 10: 0, 11: 0, 12: 0, ...} # Setup transforms", "channels first format as supported by pytorch instance = instance[np.newaxis, :, :] #", "'051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16}", "self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct", "sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features = n_features", "linux / ; windows \\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({", "return torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def", "(instance - instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from 0", "'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} #", "self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])", "dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] #", "progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files)", "must be one of (background, evaluation)') self.subset = subset self.OOD_test = OOD_test self.df", "index = index - len(dataset) raise(ValueError, f'index exceeds total number of instances, index", "label_mapping(self) -> Dict: \"\"\" generate mapping dict from datasetid to global class id.", "class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset):", "10: 0, 11: 0, 12: 0, ...} # Setup transforms enable evaluation as", "'evaluation' set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be", "= sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i", "pandas as pd import numpy as np import os from typing import List,", "in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.png')])", "= Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84, 84] label", "5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, ...}", "the index in the corresponding dataset. :param index: :return: dataset_id, item \"\"\" for", "np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class", "0 # 3 Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01 ...", "in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...}", "2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset id", "__init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments: subset: Whether the dataset", "[] print('Indexing {}...'.format(subset)) # Quick first pass to find total for tqdm bar", "# Arguments: :param dataset_list: need to first prepare each sub-dataset \"\"\" self.dataset_list =", "0, 11: 0, 12: 0, ...} # Setup transforms enable evaluation as OOD", "\"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'), Meta('evaluation', 'FGVC_Aircraft')])", "Name of the subset # Returns A list of dicts containing information about", "class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset,", "'//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1: 0,", "data and the remaining features are the class index. # Arguments samples_per_class: Number", "0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551',", "Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01 ... 1 0 #", "4 n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict:", "dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ ==", "all the image files in a particular subset of the Omniglot dataset dataset", "The first feature is the index of the sample in the data and", "are the class index. # Arguments samples_per_class: Number of samples per class in", "mapping method to map index (in __getitem__ method) to the index in the", "os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux", "raise(ValueError, 'subset must be one of (background, evaluation)') self.subset = subset self.df =", "in the corresponding dataset. :param index: :return: dataset_id, item \"\"\" for dataset_id, dataset", "< len(dataset): return dataset_id, index else: index = index - len(dataset) raise(ValueError, f'index", "self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\"", "filepath subset id class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0 0 #", "self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id']", "to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self,", "def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance =", "subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct", "Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict:", "= instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] #", "Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01 ... 2 0 #", "# 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath':", "\"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be one of", "label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index", "1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id", "- 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...]", "0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg',", "class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset):", "particular subset of the Omniglot dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset))", "def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of datasets # Arguments:", "for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f", "def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index", "index (in __getitem__ method) to the index in the corresponding dataset. :param index:", "}) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item):", "images = [] print('Indexing {}...'.format(subset)) # Quick first pass to find total for", "Create a dataframe to be consistent with other Datasets self.df = pd.DataFrame({ 'class_id':", "windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in", "of (background, evaluation)') self.subset = subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) #", "'class_id': [i % self.n_classes for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def", "in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name = root.split(os.sep)[-1]", "self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting':", "MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of datasets #", "normalize to [0, 1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile,", "from 0 -> 2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self):", "# linux / ; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) #", "0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0,", "'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target", "OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments: subset: Whether the dataset represents", "is the index of the sample in the data and the remaining features", "dicts containing information about all the image files in a particular subset of", "'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict:", "= tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files)", "true_label is the label in sub-dataset label = self.datasetid_to_class_id[item] # int return instance,", "= pd.DataFrame({ 'class_id': [i % self.n_classes for i in range(len(self))] }) self.df =", "of the Omniglot dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick", "{self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher':", "def index_mapping(self, index) -> (int, int): \"\"\" A mapping method to map index", "...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16} #", "__getitem__(self, item): class_id = item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id)", "in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f)", "{index}') def label_mapping(self) -> Dict: \"\"\" generate mapping dict from datasetid to global", "# from 0 -> 20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) #", "relevant information. # Arguments subset: Name of the subset # Returns A list", "# ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5,", "\"\"\"Dataset class representing miniImageNet dataset # Arguments: subset: Whether the dataset represents the", "information about all the image files in a particular subset of the Omniglot", "dataset in self.dataset_list]) if __name__ == \"__main__\": # debug on MultiDataset evaluation =", "return instance, label def __len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self):", "28X28 instance = instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84] label =", "class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ...", "# {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df =", "# from 0 -> 16 return instance, label def __len__(self): return len(self.df) def", "return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to", "item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84,", "folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name =", "sample in the data and the remaining features are the class index. #", "= item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def", "instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset):", "dataset to represent \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must", "class_id = item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset):", "f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in", "dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset)", "subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)')", "from 0 -> 20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28,", "subset: Whether the dataset represents the background or evaluation set \"\"\" if subset", "subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments: subset: Whether the dataset", "{'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df =", "Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name,", "dict from datasetid to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict()", "for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f)", "__len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a", "= class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item)", "instance.min()) label = self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance), label def", "dataframe to be consistent with other Datasets self.df = pd.DataFrame({ 'class_id': [i %", "dataset represents the background or evaluation set target: which dataset to represent \"\"\"", "2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod", "dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] #", "index of the sample in the data and the remaining features are the", "0, 12: 0, ...} # Setup transforms enable evaluation as OOD dataset self.transform", "instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels first format as", "'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) #", "0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0,", "\"\"\" generate mapping dict from datasetid to global class id. :return: datasetid_to_class_id \"\"\"", "item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84,", "# [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]: i for i", "__init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample from the", "__len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset", "6: 0, 7: 0, 8: 0, 9: 0, 10: 0, ...} # Setup", "[2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in", "self.datasetid_to_class_id[item] # from 0 -> 16 return instance, label def __len__(self): return len(self.df)", "0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda", "representing a list of datasets # Arguments: :param dataset_list: need to first prepare", "by pytorch instance = instance[np.newaxis, :, :] # [1, 28, 28] # Normalise", "import List, Dict from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False):", "'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict:", "{self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006':", "== 'background' else 'val' # Quick first pass to find total for tqdm", "Setup transforms enable evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), #", "to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0", "label in sub-dataset label = self.datasetid_to_class_id[item] # int return instance, label def __len__(self):", "... 2 0 # 3 Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0", "__init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of datasets # Arguments: :param", "# [1, 28, 28] # Normalise to 0-1 instance = (instance - instance.min())", "0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda", "# JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item]", "from datasetid to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset", "index_subset(subset): \"\"\"Index a subset by looping through all of its files and recording", "to first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def", "from torchvision import transforms from skimage import io from tqdm import tqdm import", "if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux / ; windows", "+ dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label", "+= len([f for f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root,", "(instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance),", "\\\\ # n01770081 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name,", "item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance", "0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance =", "represent \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be one", "1 Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01 ... 2 0", "{dict: 12000} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0,", "'background' else 'val' # Quick first pass to find total for tqdm bar", "transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5,", "MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments: subset: Whether", "the 'background' or 'evaluation' set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError,", "{MiniImageNet: 12000} # 0 n01770081 ... 0 0 # 1 n01770081 ... 1", "to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting',", "= subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has", "= subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe", "__getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) # [3,", "miniImageNet dataset # Arguments: subset: Whether the dataset represents the background or evaluation", "subset id class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0", "of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name =", "ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006',", "__getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) # [3,", "0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0,", "1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath", "... 1 0 # 2 n01770081 ... 2 0 # 3 n01770081 ...", "dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass to find", "progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files)", "'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return", "config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot", "% self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list:", "# [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id =", "label = self.datasetid_to_class_id[item] # int return instance, label def __len__(self): return sum([len(dataset) for", "f'index exceeds total number of instances, index {index}') def label_mapping(self) -> Dict: \"\"\"", "-> 16 return instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique())", "class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments: subset:", "# class_name filepath subset id class_id {MiniImageNet: 12000} # 0 n01770081 ... 0", "of extra features each sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes =", "+= len([f for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root,", "0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name", "self.target)) # Index of dataframe has direct correspondence to item in dataset self.df", "return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset by looping through all", "continue alphabet = root.split(os.sep)[-2] # linux / ; windows \\\\ # Angelic.0 class_name", "ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher',", "FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) #", "= self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id {DataFrame: (52720,", "self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1: 0, 2: 0,", "0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), #", "alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01", "tqdm bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)):", "f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if", "84x84 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from", "class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset):", "distinct classes in the dataset n_features: Number of extra features each sample should", "Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720}", "= 0 class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id", "0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, ...} #", "of dataframe has direct correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values) #", "0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png',", "2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c:", "0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28", "Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01 ... 3 0 #", "0 -> 16 return instance, label def __len__(self): return len(self.df) def num_classes(self): return", "dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) )", "Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84, 84] label =", "{dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} #", "to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081',", "map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset", "need to first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping()", "images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset == 'background'", "alphabet = root.split(os.sep)[-2] # linux / ; windows \\\\ # Angelic.0 class_name =", "self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 20", "to [0, 1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84", "f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)):", "classes in the dataset n_features: Number of extra features each sample should have.", "f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10,", "filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy", "filepath subset id class_id {MiniImageNet: 12000} # 0 n01770081 ... 0 0 #", "['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} #", "of (background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe", "each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) ->", "[] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset == 'background' else 'val'", "in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in files if", "0 Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01 ... 1 0", "# from 0 -> 2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df) def", "self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1: 0, 2: 0, 3: 0,", "960} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5:", "0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) #", "\"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset represents the", "self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird: 960} # 0", "folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet =", "1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7:", "= tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) ==", "to represent \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be", "int return instance, label def __len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def", "index_offset = index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def", "'042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath", "84] label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label def", "ToTensor() will normalize to [0, 1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item])", "12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0:", "names of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) #", "(in __getitem__ method) to the index in the corresponding dataset. :param index: :return:", "first feature is the index of the sample in the data and the", "represents the 'background' or 'evaluation' set \"\"\" if subset not in ('background', 'evaluation'):", "960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0:", "1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance =", "Arguments subset: Name of the subset # Returns A list of dicts containing", "pd.DataFrame({ 'class_id': [i % self.n_classes for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values)", "def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a", "id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset = 0", "torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset):", "+ len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id,", "PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84] label", "= transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] ]) def", "= transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5,", "= self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1: 0, 2: 0, 3:", "# n01770081 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath':", "self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ == \"__main__\": #", "('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') self.subset = subset", "for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0:", "def index_subset(subset, target): \"\"\"Index a subset by looping through all of its files", "dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label =", "= n_classes self.n_features = n_features # Create a dataframe to be consistent with", "= {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20} # {'n01770081': 0,", "representing miniImageNet dataset # Arguments: subset: Whether the dataset represents the background or", "1) features. The first feature is the index of the sample in the", "have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features = n_features # Create", "= [] print('Indexing {}...'.format(subset)) # Quick first pass to find total for tqdm", "0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe',", "import tqdm import pandas as pd import numpy as np import os from", "to be consistent with other Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes", "= self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\" A mapping method to", "# Setup transforms enable evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(),", "the image files in a particular subset of the Omniglot dataset dataset \"\"\"", ":param index: :return: dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index", "... 0 0 # 1 Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0", "3 0 # 4 n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath =", "for dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index else:", "... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} #", "self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence", "'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset =", "; windows \\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset': subset,", "'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class", "Quick first pass to find total for tqdm bar subset_len = 0 for", "if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one", "if index < len(dataset): return dataset_id, index else: index = index - len(dataset)", "DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample", "self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict:", "total number of instances, index {index}') def label_mapping(self) -> Dict: \"\"\" generate mapping", "tqdm bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)):", "i for i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1,", "the image files in a particular subset of the miniImageNet dataset \"\"\" images", "dataset_list: need to first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id =", "be one of (background, evaluation)') self.subset = subset self.OOD_test = OOD_test self.df =", "find total for tqdm bar subset_len = 0 for root, folders, files in", "'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636}", "in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture,", "class representing a list of datasets # Arguments: :param dataset_list: need to first", "'train' if subset == 'background' else 'val' # Quick first pass to find", "\"\"\" A mapping method to map index (in __getitem__ method) to the index", "transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item):", "= pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct correspondence to item in", "self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1)", "len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by looping through all of its", "0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03',", "instance = (instance - instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] #", "integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id =", "# 1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ... 2 0 #", "exceeds total number of instances, index {index}') def label_mapping(self) -> Dict: \"\"\" generate", "transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5],", "one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target self.df", "; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f", "{0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1:", "0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet", "particular subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset))", "[0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item):", "background or evaluation set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset", "in sub-dataset label = self.datasetid_to_class_id[item] # int return instance, label def __len__(self): return", "from torch.utils.data import Dataset import torch from PIL import Image from torchvision import", "sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id", "...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird:", "np import os from typing import List, Dict from config import DATA_PATH class", "class in the dataset n_classes: Number of distinct classes in the dataset n_features:", "2 Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01 ... 3 0", "dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id =", "Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1) images.append({", "3 0 # 4 014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath =", "item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset): return dataset_id,", "0 # 2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ... 3 0", "dataset n_features: Number of extra features each sample should have. \"\"\" self.samples_per_class =", "transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] ]) def __getitem__(self, item):", "label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels first", "+ '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar", "std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) #", "1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ... 2 0 # 3", "Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters", "A mapping method to map index (in __getitem__ method) to the index in", "# Normalise to 0-1 instance = (instance - instance.min()) / (instance.max() - instance.min())", "os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.png')]) progress_bar", "'/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len)", "class_name filepath subset id class_id {MiniImageNet: 12000} # 0 n01770081 ... 0 0", "return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset", "OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0,", "= self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label else: instance =", "# {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0: 0,", "of datasets # Arguments: :param dataset_list: need to first prepare each sub-dataset \"\"\"", "to channels first format as supported by pytorch instance = instance[np.newaxis, :, :]", "from 0 -> 16 return instance, label def __len__(self): return len(self.df) def num_classes(self):", "[28, 28] # Reindex to channels first format as supported by pytorch instance", "information about all the image files in a particular subset of the miniImageNet", "= self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet: 12000} #", "= 0 for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f", "52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0:", "class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments:", "torchvision import transforms from skimage import io from tqdm import tqdm import pandas", "16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c:", "16 return instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod", "target: which dataset to represent \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError,", "the DummyDataset has (n_features + 1) features. The first feature is the index", "in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] #", "'/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux / ;", "# filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1):", ":return: dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset):", "- len(dataset) raise(ValueError, f'index exceeds total number of instances, index {index}') def label_mapping(self)", "torch from PIL import Image from torchvision import transforms from skimage import io", "'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset", "else 'val' # Quick first pass to find total for tqdm bar subset_len", "# [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 20 return", "0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG,", "# 4 Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath']", "]) def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance", "if __name__ == \"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation',", "0 -> 2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self): return", "0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0,", "= io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels first format as supported", "filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing", "0-1 instance = (instance - instance.min()) / (instance.max() - instance.min()) label = self.datasetid_to_class_id[item]", "= index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self,", "range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df", "2 0 # 3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ... 4", "20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex", "a particular subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target,", "index: :return: dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index <", "if len(files) == 0: continue alphabet = root.split(os.sep)[-2] # linux / ; windows", "io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels first format as supported by", "def index_subset(subset): \"\"\"Index a subset by looping through all of its files and", "os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in files if f.endswith('.jpg')])", "014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960}", "root.split(os.sep)[-2] # linux / ; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1])", "instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] # Reindex to channels", "print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset == 'background' else 'val' #", "os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] #", ":return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset = 0 for", "1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))", "def __getitem__(self, item): class_id = item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float),", "global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset", "== \"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'), Meta('evaluation',", "root.split(os.sep)[-1] # linux / ; windows \\\\ # n01770081 for f in files:", "index. # Arguments samples_per_class: Number of samples per class in the dataset n_classes:", "to find total for tqdm bar subset_len = 0 for root, folders, files", "'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class", "= {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0,", "== 0: continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ #", "mapping dict from datasetid to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id =", "datasetid_to_class_id = dict() index_offset = 0 class_id_offset = 0 for dataset in self.dataset_list:", "its files and recording relevant information. # Arguments subset: Name of the subset", "import io from tqdm import tqdm import pandas as pd import numpy as", "about all the image files in a particular subset of the Omniglot dataset", "= dict() index_offset = 0 class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update(", "Arguments: :param dataset_list: need to first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list", "...} # Setup transforms enable evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84),", "all of its files and recording relevant information. # Arguments subset: Name of", "{'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) #", "10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485,", "{dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda", "c: self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet: 12000} # 0 n01770081", "Omniglot dataset # Arguments: subset: Whether the dataset represents the 'background' or 'evaluation'", "must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target =", "the dataset represents the background or evaluation set target: which dataset to represent", "2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ... 3 0 # 4", "first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self,", "instance = instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item]", "pd import numpy as np import os from typing import List, Dict from", "not in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') if", "files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) })", "progress_bar.close() return images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi", "root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in", "# linux / ; windows \\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1)", "Dict from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class", "(background, evaluation)') self.subset = subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index", "num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ == \"__main__\": # debug on", "# alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)} # 0 Angelic.0", "evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be", "subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset", "[0, 1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance", "raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset", "def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label", "0 # 1 Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01 ...", "# Arguments samples_per_class: Number of samples per class in the dataset n_classes: Number", "class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset':", "if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background,", "item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset", "'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in", "sub-dataset label = self.datasetid_to_class_id[item] # int return instance, label def __len__(self): return sum([len(dataset)", "# 2 n01770081 ... 2 0 # 3 n01770081 ... 3 0 #", "c: self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)} #", "class representing Omniglot dataset # Arguments: subset: Whether the dataset represents the 'background'", "subset # Returns A list of dicts containing information about all the image", "# {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0,", "from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing", "= 0 for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f", "filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class", "OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments: subset:", "= root.split(os.sep)[-1] # linux / ; windows \\\\ # 014.Indigo_Bunting for f in", "2 0 # 3 Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01", "def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset by looping", "...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],", "target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of", "class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index =", "n_classes: Number of distinct classes in the dataset n_features: Number of extra features", "self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20} # {'n01770081':", "0 -> 20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28]", "[3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance,", "of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636]", "index) -> (int, int): \"\"\" A mapping method to map index (in __getitem__", "i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe': 2,", "or evaluation set target: which dataset to represent \"\"\" if subset not in", "one of (background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of", "'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return", "instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance) #", "files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH +", "samples_per_class: Number of samples per class in the dataset n_classes: Number of distinct", "root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name", "has (n_features + 1) features. The first feature is the index of the", "4 Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] #", "= self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 ->", "<gh_stars>0 from torch.utils.data import Dataset import torch from PIL import Image from torchvision", "Number of extra features each sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes", "(n_features + 1) features. The first feature is the index of the sample", "datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values())))", "6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01 ...", "{}...'.format(subset)) # Quick first pass to find total for tqdm bar subset_len =", "c: self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird: 960} # 0 014.Indigo_Bunting", "list of datasets # Arguments: :param dataset_list: need to first prepare each sub-dataset", "# linux / ; windows \\\\ # n01770081 for f in files: progress_bar.update(1)", "dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id =", "1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486',", "features each sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features", "# Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name':", "self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet: 12000} # 0 n01770081 ...", "0 # 2 n01770081 ... 2 0 # 3 n01770081 ... 3 0", "-> 2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique())", "}) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10,", "index else: index = index - len(dataset) raise(ValueError, f'index exceeds total number of", "pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct correspondence to item in dataset", "Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01 ... 4 0 #", "9: 0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(),", "target): \"\"\"Index a subset by looping through all of its files and recording", "the background or evaluation set target: which dataset to represent \"\"\" if subset", "self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id {DataFrame:", "...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize", "'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class", "= dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\" A", "continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # n01770081 for", "-> Dict: \"\"\" generate mapping dict from datasetid to global class id. :return:", "self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177',", "in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH", "self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item])", "i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2,", "in a particular subset of the Omniglot dataset dataset \"\"\" images = []", "self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]:", "integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052',", "bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len", "//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset", "subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len +=", "class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset =", "# Arguments subset: Name of the subset # Returns A list of dicts", "= self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset label = self.datasetid_to_class_id[item] #", "id class_id {MiniImageNet: 12000} # 0 n01770081 ... 0 0 # 1 n01770081", "# {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0,", "[i % self.n_classes for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self):", "the dataset n_features: Number of extra features each sample should have. \"\"\" self.samples_per_class", "Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01 ... 4 0 # Create", "0 for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for", "Omniglot dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass", "windows \\\\ # n01770081 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name':", "'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def", "import transforms from skimage import io from tqdm import tqdm import pandas as", "subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images", "must be one of (background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) #", "instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset,", "__getitem__ method) to the index in the corresponding dataset. :param index: :return: dataset_id,", "dataset represents the background or evaluation set \"\"\" if subset not in ('background',", "sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ == \"__main__\": # debug on MultiDataset", "subset: Whether the dataset represents the background or evaluation set target: which dataset", "Image from torchvision import transforms from skimage import io from tqdm import tqdm", "index_mapping(self, index) -> (int, int): \"\"\" A mapping method to map index (in", "= '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset': subset,", "and recording relevant information. # Arguments subset: Name of the subset # Returns", "representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset represents the background or", "class representing miniImageNet dataset # Arguments: subset: Whether the dataset represents the background", "self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id = item", "# Returns A list of dicts containing information about all the image files", "raise(ValueError, f'index exceeds total number of instances, index {index}') def label_mapping(self) -> Dict:", "prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index)", "self.dataset_list]) if __name__ == \"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'),", "item): dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is the", "12: 0, ...} # Setup transforms enable evaluation as OOD dataset self.transform =", "... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} #", "for i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0, '042.Vermilion_Flycatcher': 1, '051.Horned_Grebe':", "2, 'n02174001': 3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name", "0 class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id +", "'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') self.subset = subset self.OOD_test", "float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of", "item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self,", "raise(ValueError, 'subset must be one of (background, evaluation)') self.subset = subset self.OOD_test =", "images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset #", "of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [16]", "- 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...]", "for debugging/testing purposes A sample from the DummyDataset has (n_features + 1) features.", "class index. # Arguments samples_per_class: Number of samples per class in the dataset", "the dataset represents the 'background' or 'evaluation' set \"\"\" if subset not in", "# [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i", "subset: Name of the subset # Returns A list of dicts containing information", "= 0 for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len +=", "n01770081 ... 3 0 # 4 n01770081 ... 4 0 # Create dicts", "# 0 n01770081 ... 0 0 # 1 n01770081 ... 1 0 #", "files in a particular subset of the miniImageNet dataset \"\"\" images = []", "method) to the index in the corresponding dataset. :param index: :return: dataset_id, item", ":param dataset_list: need to first prepare each sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id", "len([f for f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders,", "self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence", "3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ... 4 0 # Create", "sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for", "0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,", "{0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6:", "return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id = item % self.n_classes return", "self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset label = self.datasetid_to_class_id[item] # int", "= root.split(os.sep)[-2] # linux / ; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet,", "__len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in", "= self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label def __len__(self): return", "//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing", "84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 16 return instance, label", "class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of datasets", "datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset = 0 for dataset", "20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...}", "dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index else: index", "__name__ == \"__main__\": # debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'),", "# transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test:", "0, 7: 0, 8: 0, 9: 0, 10: 0, ...} # Setup transforms", "images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath:", "0 -> 20 return instance, label def __len__(self): return len(self.df) def num_classes(self): return", "debug on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'), Meta('evaluation', 'FGVC_Aircraft')]) # print(evaluation[1000][0].shape,", "self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to item in", "return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset #", "list of dicts containing information about all the image files in a particular", "'background' or 'evaluation' set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset", "+ '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar =", "= self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird: 960} #", "# Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...}", "Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84, 84] label =", "the index of the sample in the data and the remaining features are", "class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes()", "bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)):", "from the DummyDataset has (n_features + 1) features. The first feature is the", "# Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...}", "len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset by looping through all of", "{Bird: 960} # 0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ... 1", "class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset = class_id_offset", "instance, label def __len__(self): return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes()", "# {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000}", "in a particular subset of the miniImageNet dataset \"\"\" images = [] print('Indexing", "84] label = self.datasetid_to_class_id[item] # from 0 -> 16 return instance, label def", "samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample from the DummyDataset", "{dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} #", "# [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 16 return", "files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files if", "0 # 3 n01770081 ... 3 0 # 4 n01770081 ... 4 0", "from 0 -> 20 return instance, label def __len__(self): return len(self.df) def num_classes(self):", "pytorch instance = instance[np.newaxis, :, :] # [1, 28, 28] # Normalise to", "{DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0", "id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset =", "len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item): dataset_id, index", "dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1]", "a subset by looping through all of its files and recording relevant information.", "transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] ]) def __getitem__(self,", "[1, 28, 28] # Normalise to 0-1 instance = (instance - instance.min()) /", "4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0:", "n01770081 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root,", "0, 8: 0, 9: 0, 10: 0, ...} # Setup transforms self.transform =", "target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct correspondence to", "enable evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will", "bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len", "the label in sub-dataset label = self.datasetid_to_class_id[item] # int return instance, label def", "0 # 1 n01770081 ... 1 0 # 2 n01770081 ... 2 0", "+ index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset", "evaluation set target: which dataset to represent \"\"\" if subset not in ('background',", "'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') if target not in", "num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset, target): \"\"\"Index a subset by looping through", "JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] #", "a list of datasets # Arguments: :param dataset_list: need to first prepare each", "return datasetid_to_class_id def __getitem__(self, item): dataset_id, index = self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index]", "... 3 0 # 4 Angelic.0 Angelic.0.character01 ... 4 0 # Create dicts", "method to map index (in __getitem__ method) to the index in the corresponding", "files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name = root.split(os.sep)[-1]", "len([f for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders,", "# {dict: 960} # {0: 0, 1: 0, 2: 0, 3: 0, 4:", "self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\" A mapping method", "self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id']", "return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset", "through all of its files and recording relevant information. # Arguments subset: Name", "of distinct classes in the dataset n_features: Number of extra features each sample", "i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes", "List, Dict from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset", "'/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux /", "('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') if target not", "CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset represents the background or evaluation", "\"\"\" datasetid_to_class_id = dict() index_offset = 0 class_id_offset = 0 for dataset in", "self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] #", "# filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class", "/ (instance.max() - instance.min()) label = self.datasetid_to_class_id[item] # from 0 -> 2636 return", "windows \\\\ # 014.Indigo_Bunting for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'class_name':", "transforms.ToTensor(), # ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5,", "transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance", "+ '/miniImageNet/images_{}'.format(subset)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux /", "the dataset represents the background or evaluation set \"\"\" if subset not in", "self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)} # 0", "0, 10: 0, 11: 0, 12: 0, ...} # Setup transforms enable evaluation", "dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id", "\"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass to find total", "should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features = n_features #", "subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments: subset: Whether the dataset represents", "looping through all of its files and recording relevant information. # Arguments subset:", "0 0 # 1 n01770081 ... 1 0 # 2 n01770081 ... 2", "of instances, index {index}') def label_mapping(self) -> Dict: \"\"\" generate mapping dict from", "samples per class in the dataset n_classes: Number of distinct classes in the", "containing information about all the image files in a particular subset of the", "# 2 Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01 ... 3", "in the data and the remaining features are the class index. # Arguments", "__init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the", "supported by pytorch instance = instance[np.newaxis, :, :] # [1, 28, 28] #", "014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting", "'/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len)", "other Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for i in range(len(self))]", "(52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0 0 # 1 Angelic.0 Angelic.0.character01", "...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1: 0, 2:", "range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self,", "evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct", "'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset", "to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01',", "= Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84, 84] label", "with other Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for i in", "7: 0, 8: 0, 9: 0, 10: 0, ...} # Setup transforms self.transform", "linux / ; windows \\\\ # n01770081 for f in files: progress_bar.update(1) images.append({", "{0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1:", "0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile,", "12000} # 0 n01770081 ... 0 0 # 1 n01770081 ... 1 0", "# {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4, ...} self.df", "the sample in the data and the remaining features are the class index.", "# Arguments: subset: Whether the dataset represents the 'background' or 'evaluation' set \"\"\"", "/ ; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for", "linux / ; windows \\\\ # Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01", "class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset =", "i for i in range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006': 1,", "'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)')", "transforms enable evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor()", "subset == 'background' else 'val' # Quick first pass to find total for", "subset id class_id {MiniImageNet: 12000} # 0 n01770081 ... 0 0 # 1", "0, ...} # Setup transforms enable evaluation as OOD dataset self.transform = transforms.Compose([", "if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance =", "# 4 014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] #", "for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f", "if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)):", "3 Angelic.0 Angelic.0.character01 ... 3 0 # 4 Angelic.0 Angelic.0.character01 ... 4 0", "'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return", "# true_label is the label in sub-dataset label = self.datasetid_to_class_id[item] # int return", "of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20]", "'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def", "self.label_mapping() def index_mapping(self, index) -> (int, int): \"\"\" A mapping method to map", "\"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index", "def __init__(self, subset): \"\"\"Dataset class representing miniImageNet dataset # Arguments: subset: Whether the", "DummyDataset has (n_features + 1) features. The first feature is the index of", "'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png", "dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset to ordered", "# PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84]", "self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id", "index in the corresponding dataset. :param index: :return: dataset_id, item \"\"\" for dataset_id,", "consistent with other Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for i", "500x384 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from", "0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456,", "transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1]", "debugging/testing purposes A sample from the DummyDataset has (n_features + 1) features. The", "in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class * self.n_classes def", "0 # 4 014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath']", "Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0,", "f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if", "as np import os from typing import List, Dict from config import DATA_PATH", "def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__ == \"__main__\": # debug", "import torch from PIL import Image from torchvision import transforms from skimage import", "alphabet, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images", "to item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary class names of", "__len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id = item % self.n_classes", "... 3 0 # 4 014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath", "1 0 # 2 n01770081 ... 2 0 # 3 n01770081 ... 3", "self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet: 12000}", "item): class_id = item % self.n_classes return np.array([item] + [class_id]*self.n_features, dtype=np.float), float(class_id) class", "n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000}", "dataset represents the 'background' or 'evaluation' set \"\"\" if subset not in ('background',", "evaluation as OOD dataset self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize", "dataset in enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index else: index =", "index_offset = 0 class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id:", "0 n01770081 ... 0 0 # 1 n01770081 ... 1 0 # 2", "will normalize to [0, 1] ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) #", "\"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features = n_features # Create a", "folder_name = 'train' if subset == 'background' else 'val' # Quick first pass", "# Index of dataframe has direct correspondence to item in dataset self.df =", "Normalise to 0-1 instance = (instance - instance.min()) / (instance.max() - instance.min()) label", "represents the background or evaluation set target: which dataset to represent \"\"\" if", "28] # Normalise to 0-1 instance = (instance - instance.min()) / (instance.max() -", "for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet, 'class_name': class_name, 'filepath':", "the subset # Returns A list of dicts containing information about all the", "# 1 Angelic.0 Angelic.0.character01 ... 1 0 # 2 Angelic.0 Angelic.0.character01 ... 2", "# Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to", "+ [class_id]*self.n_features, dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing", "for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class *", "per class in the dataset n_classes: Number of distinct classes in the dataset", "4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id", "-> 20 return instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique())", "features are the class index. # Arguments samples_per_class: Number of samples per class", "from skimage import io from tqdm import tqdm import pandas as pd import", "about all the image files in a particular subset of the miniImageNet dataset", "import os from typing import List, Dict from config import DATA_PATH class OmniglotDataset(Dataset):", "\"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if subset ==", "import Image from torchvision import transforms from skimage import io from tqdm import", "has direct correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values) # Convert arbitrary", "numpy as np import os from typing import List, Dict from config import", "0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0,", "...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1: 0, 2:", "960} # 0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ... 1 0", "Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 12000} # {0: '//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id", "for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id:", "dataset n_classes: Number of distinct classes in the dataset n_features: Number of extra", "Arguments: subset: Whether the dataset represents the background or evaluation set target: which", "- instance.min()) label = self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance), label", "'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}", "index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id def __getitem__(self, item):", "Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset:", "instance = instance[np.newaxis, :, :] # [1, 28, 28] # Normalise to 0-1", "class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether the dataset represents the background", "sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if", "'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name", "return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by", "instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from 0", "def __len__(self): return self.samples_per_class * self.n_classes def __getitem__(self, item): class_id = item %", "0: continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # 014.Indigo_Bunting", "//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset class representing miniImageNet", "root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet': alphabet,", "DTD_Texture, FGVC_Aircraft, FGVCx_Fungi)') self.subset = subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target))", "extra features each sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes", "self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has direct correspondence to item", "self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will normalize to [0, 1] ])", "# 3 n01770081 ... 3 0 # 4 n01770081 ... 4 0 #", "n_features: Number of extra features each sample should have. \"\"\" self.samples_per_class = samples_per_class", "if subset == 'background' else 'val' # Quick first pass to find total", "0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(),", "return images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset", "20 return instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod", "subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of dataframe has", "the data and the remaining features are the class index. # Arguments samples_per_class:", "Reindex to channels first format as supported by pytorch instance = instance[np.newaxis, :,", "84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label", "self.subset = subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe", "return sum([len(dataset) for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list])", "one of (background, evaluation)') self.subset = subset self.OOD_test = OOD_test self.df = pd.DataFrame(self.index_subset(self.subset))", "files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet = root.split(os.sep)[-2]", "= {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 16} # {'014.Indigo_Bunting': 0,", "os from typing import List, Dict from config import DATA_PATH class OmniglotDataset(Dataset): def", "== 0: continue alphabet = root.split(os.sep)[-2] # linux / ; windows \\\\ #", "+ 1) features. The first feature is the index of the sample in", "dataset.datasetid_to_class_id.values()))) ) index_offset = index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return", "if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target,", "3, 'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset", "n_classes self.n_features = n_features # Create a dataframe to be consistent with other", "subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has direct correspondence to item", "from tqdm import tqdm import pandas as pd import numpy as np import", "{dict: 960} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0,", "{dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486': 4,", "0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance =", "torch.utils.data import Dataset import torch from PIL import Image from torchvision import transforms", "remaining features are the class index. # Arguments samples_per_class: Number of samples per", "'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for i", "self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label def __len__(self): return len(self.df)", "... 0 0 # 1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ...", "0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg',", "folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files", "folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files", "= Image.open(self.datasetid_to_filepath[item]) # PNG, 28X28 instance = instance.convert('RGB') instance = self.transform(instance) # [3,", "label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label def __len__(self):", "0: continue alphabet = root.split(os.sep)[-2] # linux / ; windows \\\\ # Angelic.0", "if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)):", "1 0 # 2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting ... 3", "files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH +", "map index (in __getitem__ method) to the index in the corresponding dataset. :param", "transforms.ToTensor(), # ToTensor() will normalize to [0, 1] ]) def __getitem__(self, item): instance", "def label_mapping(self) -> Dict: \"\"\" generate mapping dict from datasetid to global class", "in enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index else: index = index", "n01770081 ... 1 0 # 2 n01770081 ... 2 0 # 3 n01770081", "/ ; windows \\\\ # n01770081 for f in files: progress_bar.update(1) images.append({ 'subset':", "}) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self, subset, target):", "image files in a particular subset of the miniImageNet dataset \"\"\" images =", "...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 20} #", "__init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments: subset: Whether the", "self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id class_id {MiniImageNet: 12000} # 0", "014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting ... 2 0 # 3 014.Indigo_Bunting", "'/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet = root.split(os.sep)[-2] # linux / ;", "io from tqdm import tqdm import pandas as pd import numpy as np", "miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name = 'train' if", "index_subset(subset, target): \"\"\"Index a subset by looping through all of its files and", "... 3 0 # 4 n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath", "List[Dataset]): \"\"\"Dataset class representing a list of datasets # Arguments: :param dataset_list: need", "len(files) == 0: continue alphabet = root.split(os.sep)[-2] # linux / ; windows \\\\", "-> 20 return instance, label else: instance = io.imread(self.datasetid_to_filepath[item]) # [28, 28] #", "features. The first feature is the index of the sample in the data", "Dataset import torch from PIL import Image from torchvision import transforms from skimage", "0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, ...} #", "# Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...}", "the background or evaluation set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError,", "9: 0, 10: 0, 11: 0, 12: 0, ...} # Setup transforms enable", "def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by looping through", "a particular subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset))", "self.n_classes = n_classes self.n_features = n_features # Create a dataframe to be consistent", "2 0 # 3 n01770081 ... 3 0 # 4 n01770081 ... 4", "Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for i in range(len(self))] })", "label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label else: instance", "# class_name filepath subset id class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0", "sample from the DummyDataset has (n_features + 1) features. The first feature is", "[20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]:", "A list of dicts containing information about all the image files in a", "0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ... 1 0 # 2", "folder_name)): if len(files) == 0: continue class_name = root.split(os.sep)[-1] # linux / ;", "# Create a dataframe to be consistent with other Datasets self.df = pd.DataFrame({", "self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 960} # {0: 0, 1: 0, 2: 0,", "0.224, 0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance", "the miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first pass", "= tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) ==", "of samples per class in the dataset n_classes: Number of distinct classes in", "'val' # Quick first pass to find total for tqdm bar subset_len =", "the corresponding dataset. :param index: :return: dataset_id, item \"\"\" for dataset_id, dataset in", "= self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] #", "def __init__(self, samples_per_class=10, n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample from", "[3, 84, 84] label = self.datasetid_to_class_id[item] # from 0 -> 16 return instance,", "os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self,", "n_classes=10, n_features=1): \"\"\"Dummy dataset for debugging/testing purposes A sample from the DummyDataset has", "tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): if len(files) == 0:", "number of instances, index {index}') def label_mapping(self) -> Dict: \"\"\" generate mapping dict", "('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft', 'FGVCx_Fungi'): raise(ValueError, 'target must be one of (CUB_Bird, DTD_Texture, FGVC_Aircraft,", "each sample should have. \"\"\" self.samples_per_class = samples_per_class self.n_classes = n_classes self.n_features =", "self.subset = subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index of", "self.samples_per_class * self.n_classes def __getitem__(self, item): class_id = item % self.n_classes return np.array([item]", "# JpegImageFile, 500x384 instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item]", "= instance[np.newaxis, :, :] # [1, 28, 28] # Normalise to 0-1 instance", "in the dataset n_features: Number of extra features each sample should have. \"\"\"", ":] # [1, 28, 28] # Normalise to 0-1 instance = (instance -", "for i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03':", "ToTensor() will normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])", "0 for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f", "instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 84x84 instance = self.transform(instance) # [3, 84, 84]", "subset id class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0 0 # 1", "self.unique_characters = sorted(self.df['class_name'].unique()) # [16] # ['014.Indigo_Bunting', '042.Vermilion_Flycatcher', '051.Horned_Grebe', ...] self.class_name_to_id = {self.unique_characters[i]:", "}) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def __init__(self, subset): \"\"\"Dataset", "num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by looping through all", "enumerate(self.dataset_list): if index < len(dataset): return dataset_id, index else: index = index -", "tqdm bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target,", "all the image files in a particular subset of the miniImageNet dataset \"\"\"", "(background, evaluation)') self.subset = subset self.df = pd.DataFrame(self.index_subset(self.subset)) # Index of dataframe has", "0 # 2 Angelic.0 Angelic.0.character01 ... 2 0 # 3 Angelic.0 Angelic.0.character01 ...", "in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.jpg')])", "4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10:", "the Omniglot dataset dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick first", "in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') if target", "files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if len(files) == 0: continue class_name =", "subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...{}...'.format(target, subset)) folder_name", "std=[0.229, 0.224, 0.225]) ]) def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384", "# {dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001': 3, 'n02219486':", "# Angelic.0 class_name = '{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1)", "'//10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1: 0,", "be consistent with other Datasets self.df = pd.DataFrame({ 'class_id': [i % self.n_classes for", "0 # 4 n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath']", "['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i for", "id: id + index_offset, dataset.datasetid_to_class_id.keys()), map(lambda class_id: class_id + class_id_offset, dataset.datasetid_to_class_id.values()))) ) index_offset", "corresponding dataset. :param index: :return: dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list):", "4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0:", "A sample from the DummyDataset has (n_features + 1) features. The first feature", "dataset. :param index: :return: dataset_id, item \"\"\" for dataset_id, dataset in enumerate(self.dataset_list): if", "in range(self.num_classes())} # {dict: 20} # {'n01770081': 0, 'n02101006': 1, 'n02108551': 2, 'n02174001':", "files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files if", "self.df.to_dict()['filepath'] # {dict: 960} # {0: '//10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict:", "84] label = self.datasetid_to_class_id[item] # from 0 -> 20 return instance, label else:", "def __init__(self, subset, target): \"\"\"Dataset class representing CUB_Bird/DTD_Texture/FGVC_Aircraft/FGVCx_Fungi dataset # Arguments: subset: Whether", "not in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') self.subset", "transforms from skimage import io from tqdm import tqdm import pandas as pd", "Whether the dataset represents the 'background' or 'evaluation' set \"\"\" if subset not", "52720} # {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5:", "normalize to [0, 1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def", "def __getitem__(self, item): instance = Image.open(self.datasetid_to_filepath[item]) # JpegImageFile, 500x384 instance = self.transform(instance) #", "class_name filepath subset id class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ...", "# {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720}", "in os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet = root.split(os.sep)[-2] #", "= self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset", "particular subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) #", "to 0-1 instance = (instance - instance.min()) / (instance.max() - instance.min()) label =", "filepath subset id class_id {DataFrame: (52720, 6)} # 0 Angelic.0 Angelic.0.character01 ... 0", "must be one of (background, evaluation)') if target not in ('CUB_Bird', 'DTD_Texture', 'FGVC_Aircraft',", "os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close() return images class DummyDataset(Dataset): def __init__(self,", "self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1: 0, 2: 0, 3: 0,", "self.df.to_dict()['class_id'] # {dict: 12000} # {0: 0, 1: 0, 2: 0, 3: 0,", "4 014.Indigo_Bunting ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict:", "progress_bar = tqdm(total=subset_len) for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): if", "arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers self.unique_characters =", "files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in files", "= transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def", "from PIL import Image from torchvision import transforms from skimage import io from", "id class_id {Bird: 960} # 0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting", "transforms.Compose([ transforms.CenterCrop(224), transforms.Resize(84), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def __getitem__(self,", "folder_name)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len)", "# filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self, subset, target): \"\"\"Dataset", "'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') self.subset = subset self.df", "# ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())}", "'{}.{}'.format(alphabet, root.split(os.sep)[-1]) # Angelic.0.character01 for f in files: progress_bar.update(1) images.append({ 'subset': subset, 'alphabet':", "FGVCx_Fungi)') self.subset = subset self.target = target self.df = pd.DataFrame(self.index_subset(self.subset, self.target)) # Index", "Index of dataframe has direct correspondence to item in dataset self.df = self.df.assign(id=self.df.index.values)", "dataset_list: List[Dataset]): \"\"\"Dataset class representing a list of datasets # Arguments: :param dataset_list:", ") index_offset = index_offset + len(dataset) class_id_offset = class_id_offset + dataset.num_classes() return datasetid_to_class_id", "dtype=np.float), float(class_id) class MultiDataset(Dataset): def __init__(self, dataset_list: List[Dataset]): \"\"\"Dataset class representing a list", "len(dataset): return dataset_id, index else: index = index - len(dataset) raise(ValueError, f'index exceeds", "label = self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance), label def __len__(self):", "self.class_name_to_id[c])) # class_name filepath subset id class_id {Bird: 960} # 0 014.Indigo_Bunting ...", "014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ... 1 0 # 2 014.Indigo_Bunting", "representing Omniglot dataset # Arguments: subset: Whether the dataset represents the 'background' or", "# 4 n01770081 ... 4 0 # Create dicts self.datasetid_to_filepath = self.df.to_dict()['filepath'] #", "28, 28] # Normalise to 0-1 instance = (instance - instance.min()) / (instance.max()", "\"\"\"Index a subset by looping through all of its files and recording relevant", "# 0 014.Indigo_Bunting ... 0 0 # 1 014.Indigo_Bunting ... 1 0 #", "root.split(os.sep)[-1] # linux / ; windows \\\\ # 014.Indigo_Bunting for f in files:", "\"\"\"Dataset class representing a list of datasets # Arguments: :param dataset_list: need to", "0, 9: 0, 10: 0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.CenterCrop(224),", "0, ...} # Setup transforms self.transform = transforms.Compose([ transforms.Resize(84), transforms.ToTensor(), # ToTensor() will", "sub-dataset \"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int,", "for dataset in self.dataset_list]) def num_classes(self): sum([dataset.num_classes() for dataset in self.dataset_list]) if __name__", "# ['n01770081', 'n02101006', 'n02108551', 'n02174001', 'n02219486', 'n02606052', 'n02747177', ...] self.class_name_to_id = {self.unique_characters[i]: i", "'/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar =", "tqdm import tqdm import pandas as pd import numpy as np import os", "by looping through all of its files and recording relevant information. # Arguments", "1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [2636] # ['Angelic.0.character01', 'Angelic.0.character02', 'Angelic.0.character03', ...] self.class_name_to_id", "in ('background', 'evaluation'): raise(ValueError, 'subset must be one of (background, evaluation)') self.subset =", "self.index_mapping(item) instance, true_label = self.dataset_list[dataset_id][index] # true_label is the label in sub-dataset label", "0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if self.OOD_test: instance = Image.open(self.datasetid_to_filepath[item])", "self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # alphabet class_name filepath subset id class_id {DataFrame: (52720, 6)}", "on MultiDataset evaluation = MultiDataset([Meta('evaluation', 'CUB_Bird'), Meta('evaluation', 'DTD_Texture'), Meta('evaluation', 'FGVC_Aircraft')]) # print(evaluation[1000][0].shape, evaluation[1000][1])", "self.datasetid_to_filepath = self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id']", "images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg progress_bar.close()", "class OmniglotDataset(Dataset): def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments:", "import pandas as pd import numpy as np import os from typing import", "1, '051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset", "1] # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) def __getitem__(self, item): if", "for f in files if f.endswith('.png')]) progress_bar = tqdm(total=subset_len) for root, folders, files", "os.walk(DATA_PATH + '/Omniglot_enriched/images_{}'.format(subset)): if len(files) == 0: continue alphabet = root.split(os.sep)[-2] # linux", "index < len(dataset): return dataset_id, index else: index = index - len(dataset) raise(ValueError,", "datasets # Arguments: :param dataset_list: need to first prepare each sub-dataset \"\"\" self.dataset_list", "self.n_classes for i in range(len(self))] }) self.df = self.df.assign(id=self.df.index.values) def __len__(self): return self.samples_per_class", "Number of samples per class in the dataset n_classes: Number of distinct classes", "or 'evaluation' set \"\"\" if subset not in ('background', 'evaluation'): raise(ValueError, 'subset must", "2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8:", "total for tqdm bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH", "# from 0 -> 20 return instance, label def __len__(self): return len(self.df) def", "014.Indigo_Bunting ... 3 0 # 4 014.Indigo_Bunting ... 4 0 # Create dicts", "n01770081 ... 2 0 # 3 n01770081 ... 3 0 # 4 n01770081", "of the sample in the data and the remaining features are the class", "for tqdm bar subset_len = 0 for root, folders, files in os.walk(DATA_PATH +", "of its files and recording relevant information. # Arguments subset: Name of the", "# Convert arbitrary class names of dataset to ordered 0-(num_speakers - 1) integers", "\"\"\" self.dataset_list = dataset_list self.datasetid_to_class_id = self.label_mapping() def index_mapping(self, index) -> (int, int):", "os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar", "return instance, label def __len__(self): return len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def", "recording relevant information. # Arguments subset: Name of the subset # Returns A", "datasetid to global class id. :return: datasetid_to_class_id \"\"\" datasetid_to_class_id = dict() index_offset =", "of the subset # Returns A list of dicts containing information about all", "2 n01770081 ... 2 0 # 3 n01770081 ... 3 0 # 4", "instance.convert('RGB') instance = self.transform(instance) # [3, 84, 84] label = self.datasetid_to_class_id[item] # from", "- 1) integers self.unique_characters = sorted(self.df['class_name'].unique()) # [20] # ['n01770081', 'n02101006', 'n02108551', 'n02174001',", "subset of the miniImageNet dataset \"\"\" images = [] print('Indexing {}...'.format(subset)) # Quick", "def __init__(self, subset, OOD_test=False): \"\"\"Dataset class representing Omniglot dataset # Arguments: subset: Whether", "Arguments: subset: Whether the dataset represents the background or evaluation set \"\"\" if", "= self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance), label def __len__(self): return", "'051.Horned_Grebe': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id", "for dataset in self.dataset_list]) if __name__ == \"__main__\": # debug on MultiDataset evaluation", "'//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict: 52720} # {0: 0, 1: 0,", "f) }) # filepath: //10.20.2.245/datasets/datasets/miniImageNet/images_evaluation\\\\n01770081\\\\00001098.jpg progress_bar.close() return images class Meta(Dataset): def __init__(self, subset,", "= self.df.assign(id=self.df.index.values) # Convert arbitrary class names of dataset to ordered 0-(num_speakers -", "self.datasetid_to_class_id[item] # from 0 -> 2636 return torch.from_numpy(instance), label def __len__(self): return len(self.df)", "+ '/Omniglot_enriched/images_{}'.format(subset)): subset_len += len([f for f in files if f.endswith('.png')]) progress_bar =", "len(self.df) def num_classes(self): return len(self.df['class_name'].unique()) @staticmethod def index_subset(subset): \"\"\"Index a subset by looping", "'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png progress_bar.close() return images class MiniImageNet(Dataset): def", "to map index (in __getitem__ method) to the index in the corresponding dataset.", "# {'Angelic.0.character01': 0, 'Angelic.0.character02': 1, 'Angelic.0.character03': 2, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c]))", "background or evaluation set target: which dataset to represent \"\"\" if subset not", "subset)) folder_name = 'train' if subset == 'background' else 'val' # Quick first", "as supported by pytorch instance = instance[np.newaxis, :, :] # [1, 28, 28]", "int): \"\"\" A mapping method to map index (in __getitem__ method) to the", "'n02219486': 4, ...} self.df = self.df.assign(class_id=self.df['class_name'].apply(lambda c: self.class_name_to_id[c])) # class_name filepath subset id", "0 for root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for", "for root, folders, files in os.walk(DATA_PATH + '/meta-dataset/{}/{}'.format(target, folder_name)): subset_len += len([f for", "root, folders, files in os.walk(DATA_PATH + '/miniImageNet/images_{}'.format(subset)): subset_len += len([f for f in", "progress_bar.update(1) images.append({ 'subset': subset, 'class_name': class_name, 'filepath': os.path.join(root, f) }) # filepath: //10.20.2.245/datasets/datasets/meta-dataset/CUB_Bird/val\\\\014.Indigo_Bunting\\\\Indigo_Bunting_0001_12469.jpg", "for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for root, folders, files", "self.df.to_dict()['filepath'] # {dict: 52720} # {0: '//10.20.2.245/datasets/datasets/Omniglot_enriched/images_evaluation\\\\Angelic.0\\\\character01\\\\0965_01.png', ...} self.datasetid_to_class_id = self.df.to_dict()['class_id'] # {dict:", "dict() index_offset = 0 class_id_offset = 0 for dataset in self.dataset_list: datasetid_to_class_id.update( dict(zip(map(lambda", "self.class_name_to_id = {self.unique_characters[i]: i for i in range(self.num_classes())} # {dict: 2636} # {'Angelic.0.character01':", "3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9:", "0: continue class_name = root.split(os.sep)[-1] # linux / ; windows \\\\ # n01770081", "# 1 n01770081 ... 1 0 # 2 n01770081 ... 2 0 #", "from typing import List, Dict from config import DATA_PATH class OmniglotDataset(Dataset): def __init__(self,", "subset_len += len([f for f in files if f.endswith('.jpg')]) progress_bar = tqdm(total=subset_len) for", "represents the background or evaluation set \"\"\" if subset not in ('background', 'evaluation'):" ]
[ "(KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings", "max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\", {\"class\":\"lvformat\"})[0].text print price,", "all prices in array for maths - min, max, mean, volatility print cardListing.find('h3').text", "styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid view for cardListing", "maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\",", "listings - grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for", "price, type print except: #error with findAll - less than 48 items? for", "in array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1]", "price and when #store all prices in array for maths - min, max,", "in array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice", "when #store all prices in array for maths - min, max, mean, volatility", "array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3]", "less than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title", "for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3]", "title and sold price and when #store all prices in array for maths", "soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price and when #store all", "Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more", "import requests, BeautifulSoup session=requests.session() headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90", "{\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price and when #store all prices", "cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price and", "mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except:", "Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid", "array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1]", "than 48 listings - grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse", "requests, BeautifulSoup session=requests.session() headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'}", "st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error with findAll -", "volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error", "like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings -", "#more than 48 listings - grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'):", "element for title and sold price and when #store all prices in array", "print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error with", "print price, type print except: #error with findAll - less than 48 items?", "{\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price and when #store all", "with findAll - less than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse", "\"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price and when #store all prices", "(X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try:", "mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\", {\"class\":\"lvformat\"})[0].text print price, type", "maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print", "prices in array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\",", "48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold", "for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price and", "#store all prices in array for maths - min, max, mean, volatility print", "try: #more than 48 listings - grid view for cardListing in soup.find(\"ul\", {\"id\":", "- less than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48", "than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and", "type=st[st.index('.')+3:] print price, type print except: #error with findAll - less than 48", "and sold price and when #store all prices in array for maths -", "for title and sold price and when #store all prices in array for", "for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price", "volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\", {\"class\":\"lvformat\"})[0].text print price, type print", "soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price and when #store", "cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price and when", "Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid view for", "grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and", "#parse element for title and sold price and when #store all prices in", "headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content)", "session=requests.session() headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers)", "soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid view for cardListing in soup.find(\"ul\",", "and when #store all prices in array for maths - min, max, mean,", "findAll - less than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element", "- min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\", {\"class\":\"lvformat\"})[0].text", "print except: #error with findAll - less than 48 items? for cardListing in", "{\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error with findAll - less", "for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:]", "x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than", "<reponame>zweed4u/Analytics import requests, BeautifulSoup session=requests.session() headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)", "max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print", "in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold price and when", "#error with findAll - less than 48 items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False):", "cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error with findAll", "sold price and when #store all prices in array for maths - min,", "prices in array for maths - min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\",", "items? for cardListing in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price", "- min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price,", "price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type print except: #error with findAll - less than", "Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826' r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid view", "except: #error with findAll - less than 48 items? for cardListing in soup.find(\"ul\",", "- grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title", "type print except: #error with findAll - less than 48 items? for cardListing", "48 listings - grid view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element", "min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"li\", {\"class\":\"lvprice prc\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=cardListing.findAll(\"li\", {\"class\":\"lvformat\"})[0].text print", "r=session.get('http://www.ebay.com/sch/i.html?_nkw='+str(styleCode)+'&LH_Complete=1&LH_Sold=1',headers=headers) soup=BeautifulSoup.BeautifulSoup(r.content) try: #more than 48 listings - grid view for cardListing in", "BeautifulSoup session=requests.session() headers={'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36'} styleCode='B39254'#'BB1826'", "view for cardListing in soup.find(\"ul\", {\"id\": \"GalleryViewInner\"}).findAll('li'): #parse element for title and sold", "in soup.find(\"ul\", {\"id\":\"ListViewInner\"}).findAll('li',recursive=False): #parse element for title and sold price and when #store", "min, max, mean, volatility print cardListing.find('h3').text st=cardListing.findAll(\"div\", {\"class\":\"gvprices\"})[0].text.split('$')[1] price=st[:st.index('.')+3] type=st[st.index('.')+3:] print price, type" ]
[ "Job.DoesNotExist: return None class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description", "generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs)", "{ \"description\": \"Have you checked with candidate on the status of his advanced", "None class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description =", "] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True,", "7 }, OFFERED: { \"description\": \"Have you checked with candidate whether he has", "default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True)", "return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try:", "models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False,", "\"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you checked with candidate on the", "__repr__(self): return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} /", "possible next stages to for the application to move into \"\"\" return Application.NEXT_STAGE[self.stage]", "[INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [],", "JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL,", "user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True,", "objects = ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING", "= 1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING =", "= models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open", "STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with hiring manager whether he/she has", "/ Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves the possible next", "return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects = JobManager() label =", "}, REVIEWING: { \"description\": \"Have you waited a few days yet?\", \"deadline\": 7", "choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx", "(REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"),", "ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING = 4", "import models from .utils import upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager):", "offer?\", \"deadline\": 7 } } categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED,", "you waited a few days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have", "\"description\": \"Checked with hiring manager whether he/she has reviewed the application?\", \"deadline\": 7", "\"Have you checked with candidate whether he has gone for the interview?\", \"deadline\":", "for the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you checked with", "from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except", "return self.__str__() def __str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager):", "OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS = { PENDING:", "status of his advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have you", "{self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves the possible next stages to", "OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS = { PENDING: {", "models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Application Username:", "[REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED],", "Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return", "resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx files\", )", "}, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate on the status of", "modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username}", "you checked with candidate whether he has gone for the interview?\", \"deadline\": 7", "he has taken up the offer?\", \"deadline\": 7 } } categories = [", "return None class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description", "__str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs):", "def possible_next_stages(self) -> list: \"\"\" retrieves the possible next stages to for the", "\"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED,", "= { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED],", "\"description\": \"Have you checked with candidate on the status of his interview?\", \"deadline\":", "7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate on the status", "Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) -> list:", "}, OFFERED: { \"description\": \"Have you checked with candidate whether he has taken", "7 }, INTERVIEWING: { \"description\": \"Have you checked with candidate on the status", "return None class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description =", "null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage", "@property def possible_next_stages(self) -> list: \"\"\" retrieves the possible next stages to for", "= 7 HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED],", "= ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING =", "recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job',", "= models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, )", "the offer?\", \"deadline\": 7 } } categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"),", "Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED = 3", "{ \"description\": \"Have you checked with candidate on the status of his interview?\",", "return f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try:", "OFFERED: { \"description\": \"Have you checked with candidate whether he has taken up", "categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced", "self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60,", "on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email =", "help_text=\"Please upload only PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified =", "None class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True,", "checked with candidate on the status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING:", "\"Have you checked with candidate on the status of his interview?\", \"deadline\": 7", "label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField() created", "= models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\")", "blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job", "default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact =", "def __repr__(self): return self.__str__() def __str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\"", "related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job =", "\"Have you checked with candidate on the status of his advanced interview?\", \"deadline\":", "return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager() PENDING =", "models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return", "} categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING,", "\"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you checked with candidate whether he", "has taken up the offer?\", \"deadline\": 7 } } categories = [ (PENDING,", "his advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have you checked with", "whether he has taken up the offer?\", \"deadline\": 7 } } categories =", "def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects", "upload only PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True)", "4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED = 7 HIRED = 8", "import upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try:", "SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED:", "= 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED =", "2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6", "modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Job Label: {self.label}", "you checked with candidate on the status of his advanced interview?\", \"deadline\": 7", "yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you checked with candidate whether", "\"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate on the", "OFFERED = 7 HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING:", "(PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"),", "def __str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self,", "INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED:", "class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None class", "on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False)", "models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email", "models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Job Label: {self.label} / Company:", "def __str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self,", "\"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ]", "docx files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__()", "REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked", "Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False,", "try: return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager() label", "__str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs):", "INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED = 7 HIRED", "{ PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING:", "class Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED =", "7 } } categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING,", "JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves the", "= 6 OFFERED = 7 HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING,", "(REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False,", "CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model):", "Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company", ".utils import upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs):", "7 }, SHORTLISTED: { \"description\": \"Have you checked with candidate whether he has", "= models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField() created =", "f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return", "{ \"description\": \"Have you checked with candidate whether he has gone for the", "= models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created", "checked with candidate whether he has taken up the offer?\", \"deadline\": 7 }", "self.__str__() def __str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def", "blank=False) stage = models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload", "OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS", "/ Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist:", "{ \"description\": \"Have you checked with candidate whether he has taken up the", "return None class Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING = 2", "null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please", "on the status of his advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\":", "NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED,", "blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata =", "models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx files\", ) created =", "advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have you checked with candidate", "}, INTERVIEWING: { \"description\": \"Have you checked with candidate on the status of", "Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING =", "6 OFFERED = 7 HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED],", "{ \"description\": \"Have you waited a few days yet?\", \"deadline\": 7 }, SHORTLISTED:", "with hiring manager whether he/she has reviewed the application?\", \"deadline\": 7 }, REVIEWING:", "[REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS = {", "models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email", "\"description\": \"Have you checked with candidate whether he has taken up the offer?\",", "\"description\": \"Have you checked with candidate whether he has gone for the interview?\",", "the status of his advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have", "\"deadline\": 7 }, OFFERED: { \"description\": \"Have you checked with candidate whether he", "3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED = 7", "candidate on the status of his advanced interview?\", \"deadline\": 7 }, OFFERED: {", "checked with candidate whether he has gone for the interview?\", \"deadline\": 7 },", "default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified", "= 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED = 7 HIRED =", "{ \"description\": \"Checked with hiring manager whether he/she has reviewed the application?\", \"deadline\":", "possible_next_stages(self) -> list: \"\"\" retrieves the possible next stages to for the application", "days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you checked with candidate", "candidate whether he has gone for the interview?\", \"deadline\": 7 }, INTERVIEWING: {", "def __repr__(self): return self.__str__() def __str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\"", "/ Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist:", "= models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Application", "= models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} /", "a few days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you checked", "\"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter =", "the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you checked with candidate", "HIRED: [] } STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with hiring manager", "whether he/she has reviewed the application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have", "{self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None", "few days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you checked with", "} } categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"),", "upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True)", "whether he has gone for the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\":", "f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return", "\"description\": \"Have you checked with candidate on the status of his advanced interview?\",", "HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING],", "(INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user", "\"\"\" retrieves the possible next stages to for the application to move into", "models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Job Label:", "class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None class", "related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField(", "def __repr__(self): return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk}", "unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified =", "PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self):", "retrieves the possible next stages to for the application to move into \"\"\"", "or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return", "validators=[validate_file], help_text=\"Please upload only PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified", "class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True,", "JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model):", "REJECTED: [], HIRED: [] } STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with", "he has gone for the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have", "__repr__(self): return self.__str__() def __str__(self): return f\"<Company Label: {self.label} / Description: {self.description}>\" class", "with candidate on the status of his advanced interview?\", \"deadline\": 7 }, OFFERED:", "description = models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True)", "8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING,", "= models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True)", "ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS =", "f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) ->", "REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED", "status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked", "job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume =", "only PDF or docx files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def", "REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: [] }", "models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or", "hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField()", "ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED = 7 HIRED = 8 NEXT_STAGE", "\"Checked with hiring manager whether he/she has reviewed the application?\", \"deadline\": 7 },", "ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model):", "\"deadline\": 7 }, REVIEWING: { \"description\": \"Have you waited a few days yet?\",", "(ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User',", "from .utils import upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self,", "taken up the offer?\", \"deadline\": 7 } } categories = [ (PENDING, \"Pending\"),", "null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata", "validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None", "try: return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager() PENDING", "models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self):", "= 5 REJECTED = 6 OFFERED = 7 HIRED = 8 NEXT_STAGE =", "= JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company',", "with candidate whether he has gone for the interview?\", \"deadline\": 7 }, INTERVIEWING:", "[REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED],", "self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\"", "PENDING: { \"description\": \"Checked with hiring manager whether he/she has reviewed the application?\",", "models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact", "}, SHORTLISTED: { \"description\": \"Have you checked with candidate whether he has gone", "{self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except", "he/she has reviewed the application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have you", "you checked with candidate on the status of his interview?\", \"deadline\": 7 },", ") resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx files\",", "hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified", "candidate on the status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\":", "return self.__str__() def __str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager):", "{self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves the possible", "} STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with hiring manager whether he/she", "= models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume = models.FileField(", "django.db import models from .utils import upload_to_file, generate_random_string from .validators import validate_file class", "1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5", "models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} / JobId:", "models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True)", "return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager() label =", "on the status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have", "gone for the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you checked", "models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file,", "label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True,", "null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254)", "\"deadline\": 7 } } categories = [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"),", "Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs)", "metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__()", "\"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user')", "import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return", "blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage =", "manager whether he/she has reviewed the application?\", \"deadline\": 7 }, REVIEWING: { \"description\":", "CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata = models.TextField()", "application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have you waited a few days", "\"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED,", "__repr__(self): return self.__str__() def __str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class", "models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True)", "company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254)", "REJECTED = 6 OFFERED = 7 HIRED = 8 NEXT_STAGE = { PENDING:", "SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED:", "/ JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves", "email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories,", "get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects =", "Label: {self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs)", "(SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"),", "list: \"\"\" retrieves the possible next stages to for the application to move", "= models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def", "[], HIRED: [] } STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with hiring", "(HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User',", "def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects", "the application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have you waited a few", "= models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter') email = models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE,", "SHORTLISTED: { \"description\": \"Have you checked with candidate whether he has gone for", "[ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED,", "[ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED, REJECTED], REJECTED: [], HIRED: []", "7 HIRED = 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED:", "models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created =", "interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you checked with candidate on", "created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return", "= models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF", "models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open =", "5 REJECTED = 6 OFFERED = 7 HIRED = 8 NEXT_STAGE = {", "description = models.TextField(blank=True, null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60,", "with candidate whether he has taken up the offer?\", \"deadline\": 7 } }", "of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with", "interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate on", "except Job.DoesNotExist: return None class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\")", "checked with candidate on the status of his advanced interview?\", \"deadline\": 7 },", "objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata", "return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self)", "{ PENDING: { \"description\": \"Checked with hiring manager whether he/she has reviewed the", "[] } STAGE_TO_TASKS = { PENDING: { \"description\": \"Checked with hiring manager whether", "waited a few days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\": \"Have you", "= models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def", "= models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self):", "Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True)", "models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Company Label:", "modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Company Label: {self.label}", "{self.label} / Description: {self.description}>\" class JobManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except", "the possible next stages to for the application to move into \"\"\" return", "{self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\"", "= models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Job Label: {self.label} /", "ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate on the status of his", "self.__str__() def __str__(self): return f\"<Job Label: {self.label} / Company: {self.company.label}>\" class ApplicationManager(models.Manager): def", "__str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property def", "on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email = models.EmailField(max_length=254) is_open = models.BooleanField(default=True)", "return self.__str__() def __str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage:", "reviewed the application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have you waited a", "= models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL, null=True, blank=True, related_name='recruiter')", "interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have you checked with candidate whether", "7 }, REVIEWING: { \"description\": \"Have you waited a few days yet?\", \"deadline\":", "his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you checked with candidate", "REVIEWING: { \"description\": \"Have you waited a few days yet?\", \"deadline\": 7 },", "= CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True) description = models.TextField(blank=True, null=True) metadata =", "upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return", "except Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING", "\"Have you waited a few days yet?\", \"deadline\": 7 }, SHORTLISTED: { \"description\":", "= models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Job", "get_or_none(self, **kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects =", "class Job(models.Model): objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True)", "\"Have you checked with candidate whether he has taken up the offer?\", \"deadline\":", "[HIRED, REJECTED], REJECTED: [], HIRED: [] } STAGE_TO_TASKS = { PENDING: { \"description\":", "Stage: {self.stage}>\" @property def possible_next_stages(self) -> list: \"\"\" retrieves the possible next stages", "from django.db import models from .utils import upload_to_file, generate_random_string from .validators import validate_file", ".validators import validate_file class CompanyManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Company.DoesNotExist:", "stage = models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only", "REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED, OFFERED], OFFERED: [HIRED,", "= models.EmailField(max_length=254) is_open = models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified =", "= models.FileField( upload_to=upload_to_file, validators=[validate_file], help_text=\"Please upload only PDF or docx files\", ) created", "= models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Company Label: {self.label} /", "you checked with candidate whether he has taken up the offer?\", \"deadline\": 7", "null=True) company = models.ForeignKey('job_applications.Company', on_delete=models.SET_NULL, null=True, blank=True) hiring_contact = models.CharField(max_length=60, default=\"default\") hiring_contact_email =", "on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume = models.FileField( upload_to=upload_to_file, validators=[validate_file],", "except Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\",", "None class Application(models.Model): objects = ApplicationManager() PENDING = 1 REVIEWING = 2 SHORTLISTED", "models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Company Label: {self.label} / Description:", "of his advanced interview?\", \"deadline\": 7 }, OFFERED: { \"description\": \"Have you checked", "is_open = models.BooleanField(default=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def", "with candidate on the status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: {", "def __str__(self): return f\"<Application Username: {self.user.username} / JobId: {self.job.pk} / Stage: {self.stage}>\" @property", "candidate whether he has taken up the offer?\", \"deadline\": 7 } } categories", "= 8 NEXT_STAGE = { PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING:", "files\", ) created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def", "-> list: \"\"\" retrieves the possible next stages to for the application to", "{self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None", "\"description\": \"Have you waited a few days yet?\", \"deadline\": 7 }, SHORTLISTED: {", "**kwargs): try: return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects = JobManager()", "\"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user =", "= { PENDING: { \"description\": \"Checked with hiring manager whether he/she has reviewed", "PENDING: [REVIEWING, SHORTLISTED], REVIEWING: [REJECTED], SHORTLISTED: [INTERVIEWING], INTERVIEWING: [ADVANCED_INTERVIEWING, REJECTED, OFFERED], ADVANCED_INTERVIEWING: [REJECTED,", "models from .utils import upload_to_file, generate_random_string from .validators import validate_file class CompanyManager(models.Manager): def", "INTERVIEWING: { \"description\": \"Have you checked with candidate on the status of his", "Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60, default=\"default\", unique=True)", "null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return", "objects = JobManager() label = models.CharField(max_length=60, default=\"default\") description = models.TextField(blank=True, null=True) company =", "\"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter = models.ForeignKey('custom_user.User', on_delete=models.SET_NULL,", "PENDING = 1 REVIEWING = 2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING", "models.TextField(blank=True, null=True) metadata = models.TextField() created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self):", "class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None class", "get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects =", "= [ (PENDING, \"Pending\"), (REVIEWING, \"Reviewing\"), (SHORTLISTED, \"Shortlisted\"), (INTERVIEWING, \"Interviewing\"), (ADVANCED_INTERVIEWING, \"Advanced Interviewing\"),", "has reviewed the application?\", \"deadline\": 7 }, REVIEWING: { \"description\": \"Have you waited", "models.EmailField(max_length=254) job = models.ForeignKey('job_applications.Job', on_delete=models.CASCADE, null=False, blank=False) stage = models.IntegerField( choices=categories, ) resume", ") created = models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self):", "= models.DateTimeField(auto_now_add=True) modified = models.DateTimeField(auto_now=True) def __repr__(self): return self.__str__() def __str__(self): return f\"<Company", "self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager() PENDING = 1", "self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager() label = models.CharField(max_length=60,", "hiring manager whether he/she has reviewed the application?\", \"deadline\": 7 }, REVIEWING: {", "try: return self.get(**kwargs) except Job.DoesNotExist: return None class Job(models.Model): objects = JobManager() label", "Company: {self.company.label}>\" class ApplicationManager(models.Manager): def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return", "has gone for the interview?\", \"deadline\": 7 }, INTERVIEWING: { \"description\": \"Have you", "SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED = 6 OFFERED", "up the offer?\", \"deadline\": 7 } } categories = [ (PENDING, \"Pending\"), (REVIEWING,", "**kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects = ApplicationManager()", "the status of his interview?\", \"deadline\": 7 }, ADVANCED_INTERVIEWING: { \"description\": \"Have you", "**kwargs): try: return self.get(**kwargs) except Company.DoesNotExist: return None class Company(models.Model): objects = CompanyManager()", "def get_or_none(self, **kwargs): try: return self.get(**kwargs) except Application.DoesNotExist: return None class Application(models.Model): objects", "\"Advanced Interviewing\"), (REJECTED, \"Rejected\"), (OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE,", "(OFFERED, \"Offered\"), (HIRED, \"Hired\"), ] user = models.ForeignKey('custom_user.User', on_delete=models.CASCADE, null=False, blank=False, related_name='user') recruiter", "= 2 SHORTLISTED = 3 INTERVIEWING = 4 ADVANCED_INTERVIEWING = 5 REJECTED =" ]
[ "from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None):", "val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return load_kitticaltech(batch_size, val_batch_size, data_root, pre_seq_length,", "data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return load_kitticaltech(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length)", "from .dataloader_traffic import load_data as load_BJ from .dataloader_human import load_data as load_human from", "return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root,", "elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist':", "import load_data as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size,", "load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back)", "require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname", "val_batch_size, data_root, require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length)", "== 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human': return load_human(batch_size,", "val_batch_size, data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif", "import load_data as load_mmnist from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import", "require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname ==", "load_BJ from .dataloader_human import load_data as load_human from .dataloader_moving_mnist import load_data as load_mmnist", "load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size,", "== 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth': return load_kth(batch_size,", "return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return load_kitticaltech(batch_size, val_batch_size,", "load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None,", "as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False,", "dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human': return", "== 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size,", "aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname ==", "import load_data as load_BJ from .dataloader_human import load_data as load_human from .dataloader_moving_mnist import", "as load_mmnist from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import load_data as", "== 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return", "load_data as load_mmnist from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import load_data", "dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist': return", "require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname ==", "return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root,", "load_data as load_BJ from .dataloader_human import load_data as load_human from .dataloader_moving_mnist import load_data", "data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname", "as load_BJ from .dataloader_human import load_data as load_human from .dataloader_moving_mnist import load_data as", "if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human':", "load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back)", "'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return load_kitticaltech(batch_size,", "import load_data as load_human from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import", "elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth':", "load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname ==", "val_batch_size, data_root, require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif", "as load_human from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import load_data as", "load_mmnist from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech", ".dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size,", "import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname", "load_human from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import load_data as load_kth", "require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif", "'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size,", "load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return", "data_root, require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname", "dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth': return", "elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname ==", "load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech': return load_kitticaltech(batch_size, val_batch_size, data_root,", "'mmnist': return load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size,", "from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import load_data as load_kth from", "data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back)", "val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root,", "load_data as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root,", "pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname", ".dataloader_traffic import load_data as load_BJ from .dataloader_human import load_data as load_human from .dataloader_moving_mnist", ".dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech", "load_data as load_human from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth import load_data", "from .dataloader_kth import load_data as load_kth from .dataloader_kitticaltech import load_data as load_kitticaltech def", "return load_human(batch_size, val_batch_size, data_root, require_back) elif dataname == 'mmnist': return load_mmnist(batch_size, val_batch_size, data_root,", "dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif dataname == 'kitticaltech':", ".dataloader_human import load_data as load_human from .dataloader_moving_mnist import load_data as load_mmnist from .dataloader_kth", "from .dataloader_human import load_data as load_human from .dataloader_moving_mnist import load_data as load_mmnist from", "def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic': return load_BJ(batch_size,", "data_root, require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length, aft_seq_length) elif", "'traffic': return load_BJ(batch_size, val_batch_size, data_root, require_back) elif dataname == 'human': return load_human(batch_size, val_batch_size,", "as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if dataname == 'traffic':", "load_mmnist(batch_size, val_batch_size, data_root, require_back) elif dataname == 'kth': return load_kth(batch_size, val_batch_size, data_root, pre_seq_length,", ".dataloader_kitticaltech import load_data as load_kitticaltech def load_data(dataname,batch_size, val_batch_size, data_root, require_back=False, pre_seq_length=None, aft_seq_length=None): if" ]
[ "of days. Defaults to 30. Returns: True if no keys are soon to", "import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger = logging.getLogger(__name__)", "logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool:", "@click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info about when GPG keys", "= \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for now, but stay tuned\"", "from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int =", "= False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for now,", "\"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a key expires within this number", "Copyright 2020-present Kensho Technologies, LLC. import logging import sys import click import gpg", "= \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message =", "this number of days. Defaults to 30. Returns: True if no keys are", "are soon to expire or already expired, False otherwise \"\"\" with get_temporary_directory() as", "< 0: no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry <", "(FPR: %s) expires in %s days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry", "%s) expires in %s days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command()", "-> None: \"\"\"Log info about when GPG keys will expire\"\"\" no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning)", "add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr,", "expires within this number of days. Defaults to 30. Returns: True if no", "sys import click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, )", "else: action_message = \"OK for now, but stay tuned\" logger.info( \"Key (FPR: %s)", "or already expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir)", "get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry =", "IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY", "= get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry <", "stay tuned\" logger.info( \"Key (FPR: %s) expires in %s days. %s\", fpr, days_to_expiry,", "False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False", "import sys import click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry,", "keys are soon to expire or already expired, False otherwise \"\"\" with get_temporary_directory()", "\"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE", "import click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from", "get_days_until_expiry, ) from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration:", "a key expires within this number of days. Defaults to 30. Returns: True", "action_message = \"OK for now, but stay tuned\" logger.info( \"Key (FPR: %s) expires", "if no keys are soon to expire or already expired, False otherwise \"\"\"", "gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory", "get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0:", "logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key", "False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry", "otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry =", "= \"OK for now, but stay tuned\" logger.info( \"Key (FPR: %s) expires in", "0: no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration:", ") -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a key expires", "..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION,", "main(days_before_warning) -> None: \"\"\"Log info about when GPG keys will expire\"\"\" no_keys_close_to_expiry =", "bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a key expires within this", "= DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a", "= True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry =", "\"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for now, but stay tuned\" logger.info(", "no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for", "number of days. Defaults to 30. Returns: True if no keys are soon", "elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\" else:", "days_warning_for_key_expiration: warn if a key expires within this number of days. Defaults to", "days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning)", "info about when GPG keys will expire\"\"\" no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning) if no_keys_close_to_expiry: sys.exit(0)", "check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration:", "expire or already expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with", "now, but stay tuned\" logger.info( \"Key (FPR: %s) expires in %s days. %s\",", "in %s days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False)", "no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry", "tuned\" logger.info( \"Key (FPR: %s) expires in %s days. %s\", fpr, days_to_expiry, action_message", "False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for now, but", "days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False action_message = \"KEY", "( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO)", "return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info about when", "fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False action_message =", "int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if", "Defaults to 30. Returns: True if no keys are soon to expire or", "required=False) def main(days_before_warning) -> None: \"\"\"Log info about when GPG keys will expire\"\"\"", "if a key expires within this number of days. Defaults to 30. Returns:", "Returns: True if no keys are soon to expire or already expired, False", "import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import", "= False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry =", "\"\"\"Log info about when GPG keys will expire\"\"\" no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning) if no_keys_close_to_expiry:", "fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None:", "True if no keys are soon to expire or already expired, False otherwise", "gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for", "DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def", "for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False action_message", ") from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int", "True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False", "expirations Args: days_warning_for_key_expiration: warn if a key expires within this number of days.", "action_message = \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK for now, but stay", "import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, )", "2020-present Kensho Technologies, LLC. import logging import sys import click import gpg from", "get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) ->", "DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a key", "key expires within this number of days. Defaults to 30. Returns: True if", "days_to_expiry < 0: no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry", "logging import sys import click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir,", "click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils", ") return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info about", "to expire or already expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir)", "def main(days_before_warning) -> None: \"\"\"Log info about when GPG keys will expire\"\"\" no_keys_close_to_expiry", "None: \"\"\"Log info about when GPG keys will expire\"\"\" no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning) if", "logger.info( \"Key (FPR: %s) expires in %s days. %s\", fpr, days_to_expiry, action_message )", "# Copyright 2020-present Kensho Technologies, LLC. import logging import sys import click import", "warn if a key expires within this number of days. Defaults to 30.", "for now, but stay tuned\" logger.info( \"Key (FPR: %s) expires in %s days.", "ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items(): if", "with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry", "days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log", "no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info about when GPG", "@click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info about when GPG keys will", "Args: days_warning_for_key_expiration: warn if a key expires within this number of days. Defaults", "= logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check", "logger.setLevel(logging.INFO) def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations", "within this number of days. Defaults to 30. Returns: True if no keys", "about when GPG keys will expire\"\"\" no_keys_close_to_expiry = check_gpg_key_expiry(days_before_warning) if no_keys_close_to_expiry: sys.exit(0) sys.exit(1)", "EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\"", "no keys are soon to expire or already expired, False otherwise \"\"\" with", "expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx:", "with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry", "\"Key (FPR: %s) expires in %s days. %s\", fpr, days_to_expiry, action_message ) return", "fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items(): if days_to_expiry", "import logging import sys import click import gpg from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION,", "LLC. import logging import sys import click import gpg from ..signing import (", "soon to expire or already expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir:", "30. Returns: True if no keys are soon to expire or already expired,", "-> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn if a key expires within", "\"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx)", "fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\"", "already expired, False otherwise \"\"\" with get_temporary_directory() as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as", "from ..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger", "< days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message =", "days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message", "if days_to_expiry < 0: no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\" elif", "key expirations Args: days_warning_for_key_expiration: warn if a key expires within this number of", "days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args: days_warning_for_key_expiration: warn", "Kensho Technologies, LLC. import logging import sys import click import gpg from ..signing", "days. Defaults to 30. Returns: True if no keys are soon to expire", "add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def check_gpg_key_expiry(", "expires in %s days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\",", "as gpg_homedir: add_trusted_keys_to_gpg_home_dir(gpg_homedir) with gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True", "in fpr_to_expiry.items(): if days_to_expiry < 0: no_keys_close_to_expiry = False action_message = \"KEY IS", "KEY ASAP!!!!\" else: action_message = \"OK for now, but stay tuned\" logger.info( \"Key", "%s days. %s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def", "but stay tuned\" logger.info( \"Key (FPR: %s) expires in %s days. %s\", fpr,", "def check_gpg_key_expiry( days_warning_for_key_expiration: int = DAYS_WARNING_FOR_KEY_EXPIRATION, ) -> bool: \"\"\"Check key expirations Args:", "no_keys_close_to_expiry = False action_message = \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry", "to 30. Returns: True if no keys are soon to expire or already", "action_message = \"KEY IS EXPIRED!\" elif days_to_expiry < days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message", "Technologies, LLC. import logging import sys import click import gpg from ..signing import", "gpg.Context(home_dir=gpg_homedir) as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in", "..signing import ( DAYS_WARNING_FOR_KEY_EXPIRATION, add_trusted_keys_to_gpg_home_dir, get_days_until_expiry, ) from ..utils import get_temporary_directory logger =", "\"OK for now, but stay tuned\" logger.info( \"Key (FPR: %s) expires in %s", "as ctx: fpr_to_expiry = get_days_until_expiry(ctx) no_keys_close_to_expiry = True for fpr, days_to_expiry in fpr_to_expiry.items():", "%s\", fpr, days_to_expiry, action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) ->", "action_message ) return no_keys_close_to_expiry @click.command() @click.argument(\"days_before_warning\", required=False) def main(days_before_warning) -> None: \"\"\"Log info", "days_warning_for_key_expiration: no_keys_close_to_expiry = False action_message = \"UPDATE KEY ASAP!!!!\" else: action_message = \"OK", "ASAP!!!!\" else: action_message = \"OK for now, but stay tuned\" logger.info( \"Key (FPR:" ]
[ "f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters(): p.requires_grad = requires_grad return", "remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters(): p.requires_grad = requires_grad return module", "Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f:", "download from zipfile import ZipFile def download_ffhq(path): path = join(path, 'ffhq') if not", "os.path import isdir, join from pathlib import Path from gdown import download from", "gdown import download from zipfile import ZipFile def download_ffhq(path): path = join(path, 'ffhq')", "if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip,", "join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip)", "from gdown import download from zipfile import ZipFile def download_ffhq(path): path = join(path,", "from zipfile import ZipFile def download_ffhq(path): path = join(path, 'ffhq') if not isdir(path):", "zipfile import ZipFile def download_ffhq(path): path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True,", "from os.path import isdir, join from pathlib import Path from gdown import download", "f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters(): p.requires_grad = requires_grad", "isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as", "download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for", "import remove from os.path import isdir, join from pathlib import Path from gdown", "join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module,", "path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip)", "not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r')", "'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters(): p.requires_grad", "from os import remove from os.path import isdir, join from pathlib import Path", "os import remove from os.path import isdir, join from pathlib import Path from", "as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters(): p.requires_grad =", "path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip')", "ZipFile def download_ffhq(path): path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip", "with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in", "import download from zipfile import ZipFile def download_ffhq(path): path = join(path, 'ffhq') if", "Path from gdown import download from zipfile import ZipFile def download_ffhq(path): path =", "output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p", "join from pathlib import Path from gdown import download from zipfile import ZipFile", "'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad):", "import isdir, join from pathlib import Path from gdown import download from zipfile", "download_ffhq(path): path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path,", "exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path)", "isdir, join from pathlib import Path from gdown import download from zipfile import", "ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def set_requires_grad(module, requires_grad): for p in module.parameters():", "import Path from gdown import download from zipfile import ZipFile def download_ffhq(path): path", "def download_ffhq(path): path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip =", "= join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu',", "import ZipFile def download_ffhq(path): path = join(path, 'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True)", "pathlib import Path from gdown import download from zipfile import ZipFile def download_ffhq(path):", "= join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with ZipFile(path_zip, 'r') as f: f.extractall(path) remove(path_zip) def", "'ffhq') if not isdir(path): Path(path).mkdir(parents=True, exist_ok=True) path_zip = join(path, 'ffhq.zip') download(id='1EL0pQnON0SFOY8XXn8DX4T6cIcKf4CNu', output=path_zip) with", "remove from os.path import isdir, join from pathlib import Path from gdown import", "from pathlib import Path from gdown import download from zipfile import ZipFile def" ]
[ "utf-8 -*- from django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\"", "\"\"\" Search records (exclude 'deleted' records by default)... For form information... see 'Creating", "from other processing. sqs = super().search() if not self.is_valid(): return self.no_query_found() # Check", "\"\"\" deleted = forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet received from", "not self.is_valid(): return self.no_query_found() # Check to see if a deleted was ticked.", "return self.no_query_found() # Check to see if a deleted was ticked. deleted =", "store the SearchQuerySet received from other processing. sqs = super().search() if not self.is_valid():", "# Check to see if a deleted was ticked. deleted = self.cleaned_data['deleted'] if", "records by default)... For form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\"", "information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self):", "see if a deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted: # return", "from django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records", "ticked. deleted = self.cleaned_data['deleted'] if deleted: # return all records pass else: #", "SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by default)... For form", "your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): # First, store", "deleted: # return all records pass else: # exclude deleted records sqs =", "http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet received", "# First, store the SearchQuerySet received from other processing. sqs = super().search() if", "forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records", "form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def", "the SearchQuerySet received from other processing. sqs = super().search() if not self.is_valid(): return", "to see if a deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted: #", "MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by default)... For form information... see", "Check to see if a deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted:", "was ticked. deleted = self.cleaned_data['deleted'] if deleted: # return all records pass else:", "Search records (exclude 'deleted' records by default)... For form information... see 'Creating your", "form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet", "'deleted' records by default)... For form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form", "by default)... For form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted", "deleted = forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet received from other", "if deleted: # return all records pass else: # exclude deleted records sqs", "default)... For form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted =", "-*- from django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search", "encoding: utf-8 -*- from django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm):", "django import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude", "super().search() if not self.is_valid(): return self.no_query_found() # Check to see if a deleted", "# -*- encoding: utf-8 -*- from django import forms from haystack.forms import SearchForm", "-*- encoding: utf-8 -*- from django import forms from haystack.forms import SearchForm class", "self.no_query_found() # Check to see if a deleted was ticked. deleted = self.cleaned_data['deleted']", "deleted = self.cleaned_data['deleted'] if deleted: # return all records pass else: # exclude", "all records pass else: # exclude deleted records sqs = sqs.exclude(deleted=1) return sqs", "'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): # First,", "= self.cleaned_data['deleted'] if deleted: # return all records pass else: # exclude deleted", "a deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted: # return all records", "For form information... see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False)", "if not self.is_valid(): return self.no_query_found() # Check to see if a deleted was", "# return all records pass else: # exclude deleted records sqs = sqs.exclude(deleted=1)", "First, store the SearchQuerySet received from other processing. sqs = super().search() if not", "self.is_valid(): return self.no_query_found() # Check to see if a deleted was ticked. deleted", "sqs = super().search() if not self.is_valid(): return self.no_query_found() # Check to see if", "own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): # First, store the", "deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted: # return all records pass", "import forms from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted'", "import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by default)... For", "forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet received from other processing. sqs", "records (exclude 'deleted' records by default)... For form information... see 'Creating your own", "<gh_stars>0 # -*- encoding: utf-8 -*- from django import forms from haystack.forms import", "other processing. sqs = super().search() if not self.is_valid(): return self.no_query_found() # Check to", "search(self): # First, store the SearchQuerySet received from other processing. sqs = super().search()", "(exclude 'deleted' records by default)... For form information... see 'Creating your own form':", "def search(self): # First, store the SearchQuerySet received from other processing. sqs =", "processing. sqs = super().search() if not self.is_valid(): return self.no_query_found() # Check to see", "SearchQuerySet received from other processing. sqs = super().search() if not self.is_valid(): return self.no_query_found()", "= super().search() if not self.is_valid(): return self.no_query_found() # Check to see if a", "return all records pass else: # exclude deleted records sqs = sqs.exclude(deleted=1) return", "class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by default)... For form information...", "self.cleaned_data['deleted'] if deleted: # return all records pass else: # exclude deleted records", "haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by default)...", "received from other processing. sqs = super().search() if not self.is_valid(): return self.no_query_found() #", "from haystack.forms import SearchForm class MySearchForm(SearchForm): \"\"\" Search records (exclude 'deleted' records by", "see 'Creating your own form': http://django-haystack.readthedocs.org/en/latest/views_and_forms.html#creating-your-own-form \"\"\" deleted = forms.BooleanField(required=False) def search(self): #", "if a deleted was ticked. deleted = self.cleaned_data['deleted'] if deleted: # return all", "= forms.BooleanField(required=False) def search(self): # First, store the SearchQuerySet received from other processing." ]
[ "# ================= # $ python setup.py register -r pypi # $ python setup.py", "upload -r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt',", "python setup.py sdist upload -r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>',", "PyPI # ================= # $ python setup.py register -r pypi # $ python", "$ python setup.py register -r pypi # $ python setup.py sdist upload -r", "sdist upload -r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version,", "author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing', 'pythonic',", "pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple", "Uploading to PyPI # ================= # $ python setup.py register -r pypi #", "description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing', 'pythonic', 'library'], py_modules=['finalexam'], requires=['nonstdlib'], )", "-r pypi # $ python setup.py sdist upload -r pypi version = '0.0'", "version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing',", "$ python setup.py sdist upload -r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version,", "# $ python setup.py register -r pypi # $ python setup.py sdist upload", "= '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing", "register -r pypi # $ python setup.py sdist upload -r pypi version =", "-r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A", "python setup.py register -r pypi # $ python setup.py sdist upload -r pypi", "<reponame>kalekundert/TestSuite import distutils.core # Uploading to PyPI # ================= # $ python setup.py", "setup.py register -r pypi # $ python setup.py sdist upload -r pypi version", "license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing', 'pythonic', 'library'], py_modules=['finalexam'], requires=['nonstdlib'],", "setup.py sdist upload -r pypi version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam',", "download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing', 'pythonic', 'library'], py_modules=['finalexam'],", "'0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\",", "import distutils.core # Uploading to PyPI # ================= # $ python setup.py register", "distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(),", "to PyPI # ================= # $ python setup.py register -r pypi # $", "name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit',", "distutils.core # Uploading to PyPI # ================= # $ python setup.py register -r", "version = '0.0' distutils.core.setup( name='finalexam', version=version, author='<NAME>', url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit", "pypi # $ python setup.py sdist upload -r pypi version = '0.0' distutils.core.setup(", "# Uploading to PyPI # ================= # $ python setup.py register -r pypi", "================= # $ python setup.py register -r pypi # $ python setup.py sdist", "url='https://github.com/kalekundert/finalexam', download_url='https://github.com/kalekundert/finalexam/tarball/'+version, license='LICENSE.txt', description=\"A simple unit testing framework.\", long_description=open('README.rst').read(), keywords=['unit', 'testing', 'pythonic', 'library'],", "# $ python setup.py sdist upload -r pypi version = '0.0' distutils.core.setup( name='finalexam'," ]
[ "as tf import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import", "list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs =", "lines. \"\"\" # get the value range vmin = -2e-1 vmax = 2e-1", "-5e-1 if (title == 'u'): vmax = 1.1e+0 vmin = -2e-1 if (title", "a PINN model model = PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn", "= 2e-1 if (title == 'psi'): vmax = 1.2e-1 vmin = -1e-1 if", "cm from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import os import pickle", "\"\"\" Compute flow velocities (u, v) for the network with output (psi, p).", "= network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v =", "uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train =", "# plot test results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0,", "with output (psi, p). Args: xy: network input variables as ndarray. Returns: (u,", "number of contour lines. \"\"\" # get the value range vmin = -2e-1", "plot position. x: x-array. y: y-array. z: z-array. title: title string. levels: number", "m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ ==", "= np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[...,", "z-array. title: title string. levels: number of contour lines. \"\"\" # get the", "plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y,", "psi, p, u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures',", "+ '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0,", "psi, 'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0], x, y, u,", "lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr',", "p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute (u,", "vmin = -2e-1 vmax = 2e-1 if (title == 'psi'): vmax = 1.2e-1", "= network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1])", "2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd]", "0] return u.numpy(), v.numpy() def contour(grid, x, y, z, title, levels=50): \"\"\" Contour", "rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2,", "y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else:", "u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() +", "Args: xy: network input variables as ndarray. Returns: (u, v) as ndarray. \"\"\"", "matplotlib.cm as cm from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import os", "os import pickle import argparse from lib.pinn import PINN from lib.network import Network", "# create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) #", "1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output", "cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax)", "parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\"", "# y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries", "2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0", "results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y,", "xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as", "= np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output zeros =", "import time import lib.tf_silent import numpy as np import tensorflow as tf import", "1.2e-1 vmin = -1e-1 if (title == 'p'): vmax = 6.1e-1 vmin =", "= -1e-1 if (title == 'p'): vmax = 6.1e-1 vmin = -5e-1 if", "8.1e-2 vmin = 0.0 # plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x,", "x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y) for test plots", "x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi,", "x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') data = [x,", "'dv'): vmax = 8.1e-2 vmin = 0.0 # plot a contour plt.subplot(grid) print(title,", "from lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000)", "vmax) if __name__ == '__main__': \"\"\" Test the physics informed neural network (PINN)", "== 'dpsi'): vmax = 1.1e-2 vmin = 0.0 if (title == 'dp'): vmax", "the physics informed neural network (PINN) model for the cavity flow governed by", "= -2e-1 vmax = 2e-1 if (title == 'psi'): vmax = 1.2e-1 vmin", "# left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or", "np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is", "= 1.2e-1 vmin = -1e-1 if (title == 'p'): vmax = 6.1e-1 vmin", "# create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[...,", "L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates", "# create meshgrid coordinates (x, y) for test plots x = np.linspace(0, 1,", "type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl')", "nu = 0.01 # build a core network model network = Network().build() network.summary()", "def contour(grid, x, y, z, title, levels=50): \"\"\" Contour plot. Args: grid: plot", "number of test samples num_test_samples = args.num_test_samples # inlet flow velocity u0 =", "'du') contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() +", "'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) +", "contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1,", "open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt", "2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1])", "'--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss',", "0.0 if (title == 'du'): vmax = 1.1e-1 vmin = 0.0 if (title", "if (title == 'psi'): vmax = 1.2e-1 vmin = -1e-1 if (title ==", "= pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6,", "network model network = Network().build() network.summary() # build a PINN model model =", "plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin,", "test samples num_test_samples = args.num_test_samples # inlet flow velocity u0 = 1 #", "= plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi')", "== 'dv'): vmax = 8.1e-2 vmin = 0.0 # plot a contour plt.subplot(grid)", "__name__ == '__main__': \"\"\" Test the physics informed neural network (PINN) model for", "variables as ndarray. Returns: (u, v) as ndarray. \"\"\" xy = tf.constant(xy) with", "def uv(network, xy): \"\"\" Compute flow velocities (u, v) for the network with", "cavity flow governed by the steady Navier-Stokes equation. \"\"\" args = parse_args() #", "dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y) for test plots x =", "y, z, title, levels=50): \"\"\" Contour plot. Args: grid: plot position. x: x-array.", "0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create", "import PINN from lib.network import Network from lib.optimizer import Optimizer def parse_args(): parser", "y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p = network.predict(xy,", "PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub =", "from lib.network import Network from lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser()", "lib.pinn import PINN from lib.network import Network from lib.optimizer import Optimizer def parse_args():", "the value range vmin = -2e-1 vmax = 2e-1 if (title == 'psi'):", "= 1.1e-2 vmin = 0.0 if (title == 'dp'): vmax = 4.1e-1 vmin", "gs = GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0,", "parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities (u,", "parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples',", "contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p", "open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))", "0], x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures',", "1.1e-1 vmin = 0.0 if (title == 'dv'): vmax = 8.1e-2 vmin =", "v) as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p", "= 0.0 if (title == 'dp'): vmax = 4.1e-1 vmin = 0.0 if", "aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test the physics informed", "contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time())", "levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar", "0]) # x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train =", "as np import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.cm as", "y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du')", "y, u, 'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() +", "(title == 'du'): vmax = 1.1e-1 vmin = 0.0 if (title == 'dv'):", "= 6.1e-1 vmin = -5e-1 if (title == 'u'): vmax = 1.1e+0 vmin", "import argparse from lib.pinn import PINN from lib.network import Network from lib.optimizer import", "uv_bnd] # train the model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train,", "uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] #", "'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0], x, y, u, 'u')", "u, v = uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path):", "velocities (u, v) for the network with output (psi, p). Args: xy: network", "= 0.0 if (title == 'dv'): vmax = 8.1e-2 vmin = 0.0 #", "cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test the physics informed neural network", "y_train = [zeros, zeros, uv_bnd] # train the model using L-BFGS-B algorithm optimizer", "y.flatten()], axis=-1) # predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p =", "xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output zeros", "-2e-1 vmax = 2e-1 if (title == 'psi'): vmax = 1.2e-1 vmin =", "0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] =", "p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x,", "import pdb import time import lib.tf_silent import numpy as np import tensorflow as", "= GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1], x, y,", "v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig =", "= GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1],", "PINN from lib.network import Network from lib.optimizer import Optimizer def parse_args(): parser =", "default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return", "g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0,", "import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import Normalize from", "import GridSpec import os import pickle import argparse from lib.pinn import PINN from", "return u.numpy(), v.numpy() def contour(grid, x, y, z, title, levels=50): \"\"\" Contour plot.", "1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show()", "training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries", "x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v - v_gt),", "# predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[...,", "parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path',", "'--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network',", "= PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub", "= 4.1e-1 vmin = 0.0 if (title == 'du'): vmax = 1.1e-1 vmin", "vmax = 1.1e-1 vmin = 0.0 if (title == 'dv'): vmax = 8.1e-2", "num_train_samples = args.num_train_samples # number of test samples num_test_samples = args.num_test_samples # inlet", "contour lines. \"\"\" # get the value range vmin = -2e-1 vmax =", "'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show()", "default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100)", "import numpy as np import tensorflow as tf import matplotlib.pyplot as plt import", "steady Navier-Stokes equation. \"\"\" args = parse_args() # number of training samples num_train_samples", "\"\"\" # get the value range vmin = -2e-1 vmax = 2e-1 if", "'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y,", "psi_gt, p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2,", "1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y) xy", "= 0.0 if (title == 'du'): vmax = 1.1e-1 vmin = 0.0 if", "p). Args: xy: network input variables as ndarray. Returns: (u, v) as ndarray.", "np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1,", "import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.cm as cm from", "# inlet flow velocity u0 = 1 # density rho = 1 #", "os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt,", "z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax)", "matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import os import pickle import argparse", "\"\"\" xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j", "m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25,", "0.01 # build a core network model network = Network().build() network.summary() # build", "vmax = 6.1e-1 vmin = -5e-1 if (title == 'u'): vmax = 1.1e+0", "plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax))", "the steady Navier-Stokes equation. \"\"\" args = parse_args() # number of training samples", "contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout()", "xy: network input variables as ndarray. Returns: (u, v) as ndarray. \"\"\" xy", "plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x,", "= [x, y, psi, p, u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data,", "# compute (u, v) u, v = uv(network, xy) u = u.reshape(x.shape) v", "batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] #", "plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x,", "== 'psi'): vmax = 1.2e-1 vmin = -1e-1 if (title == 'p'): vmax", "y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z)", "vmax = 1.1e+0 vmin = -2e-1 if (title == 'v'): vmax = 2.1e-1", "if (title == 'du'): vmax = 1.1e-1 vmin = 0.0 if (title ==", "axis=-1) # predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [", "= uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path,", "== '__main__': \"\"\" Test the physics informed neural network (PINN) model for the", "plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test the", "as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close()", "+ '.png')) plt.show() plt.close() else: # plot test results fig = plt.figure(figsize=(6, 5))", "by the steady Navier-Stokes equation. \"\"\" args = parse_args() # number of training", "Network from lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int,", "= Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y) for", "= -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid, x, y, z, title,", "vmin = -2e-1 if (title == 'v'): vmax = 2.1e-1 vmin = -2e-1", "= [zeros, zeros, uv_bnd] # train the model using L-BFGS-B algorithm optimizer =", "predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape)", "(u, v) as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy)", "2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y,", "x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__()", "tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy)", "linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m", "u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] # train the model", "'psi'): vmax = 1.2e-1 vmin = -1e-1 if (title == 'p'): vmax =", "= 1.1e-1 vmin = 0.0 if (title == 'dv'): vmax = 8.1e-2 vmin", "Navier-Stokes equation. \"\"\" args = parse_args() # number of training samples num_train_samples =", "u_gt, v_gt = data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0,", "\"\"\" Test the physics informed neural network (PINN) model for the cavity flow", "= 0.0 # plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z,", "np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close()", "== 'dp'): vmax = 4.1e-1 vmin = 0.0 if (title == 'du'): vmax", "boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr", "levels: number of contour lines. \"\"\" # get the value range vmin =", "y = np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(),", "z: z-array. title: title string. levels: number of contour lines. \"\"\" # get", "] # compute (u, v) u, v = uv(network, xy) u = u.reshape(x.shape)", "'--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval',", "u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__()", "network (PINN) model for the cavity flow governed by the steady Navier-Stokes equation.", "x, y, p, 'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x,", "(title == 'psi'): vmax = 1.2e-1 vmin = -1e-1 if (title == 'p'):", "\"\"\" args = parse_args() # number of training samples num_train_samples = args.num_train_samples #", "plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels,", "ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy)", "vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin,", "value range vmin = -2e-1 vmax = 2e-1 if (title == 'psi'): vmax", "vmin = -1e-1 if (title == 'p'): vmax = 6.1e-1 vmin = -5e-1", "m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__", "= np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr]))", "u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f:", "# x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn,", "plt import matplotlib.cm as cm from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec", "vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__':", "= [xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd =", "num_test_samples) x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict", "parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi',", "is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] #", "p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2)", "args = parse_args() # number of training samples num_train_samples = args.num_train_samples # number", "# plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2,", "1 # density rho = 1 # viscosity nu = 0.01 # build", "if (title == 'dv'): vmax = 8.1e-2 vmin = 0.0 # plot a", "1 # viscosity nu = 0.01 # build a core network model network", "1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u -", "with open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt,", "= np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi,", "as ndarray. Returns: (u, v) as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape()", "Network().build() network.summary() # build a PINN model model = PINN(network, rho=rho, nu=nu).build() #", "xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1]", "y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv')", "5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1],", "if __name__ == '__main__': \"\"\" Test the physics informed neural network (PINN) model", "y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[...,", "v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time())", "6.1e-1 vmin = -5e-1 if (title == 'u'): vmax = 1.1e+0 vmin =", "# number of training samples num_train_samples = args.num_train_samples # number of test samples", "# train the model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__)", "for i in range(psi_p.shape[-1]) ] # compute (u, v) u, v = uv(network,", "= args.num_test_samples # inlet flow velocity u0 = 1 # density rho =", "= plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi -", "plt.close() else: # plot test results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2,", "training samples num_train_samples = args.num_train_samples # number of test samples num_test_samples = args.num_test_samples", "parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte',", "'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else: # plot", "+ str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2,", "if (title == 'dpsi'): vmax = 1.1e-2 vmin = 0.0 if (title ==", "x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close()", "(title == 'dpsi'): vmax = 1.1e-2 vmin = 0.0 if (title == 'dp'):", "args.num_train_samples # number of test samples num_test_samples = args.num_test_samples # inlet flow velocity", "parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0]", "# density rho = 1 # viscosity nu = 0.01 # build a", "np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train", "(PINN) model for the cavity flow governed by the steady Navier-Stokes equation. \"\"\"", "= plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test", "= 8.1e-2 vmin = 0.0 # plot a contour plt.subplot(grid) print(title, vmin, vmax)", "of training samples num_train_samples = args.num_train_samples # number of test samples num_test_samples =", "the model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() #", "= plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e')", "u0 = 1 # density rho = 1 # viscosity nu = 0.01", "flow governed by the steady Navier-Stokes equation. \"\"\" args = parse_args() # number", "0, 1] v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid, x,", "model model = PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples,", "coordinates (x, y) for test plots x = np.linspace(0, 1, num_test_samples) y =", "data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y,", "default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities", "tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u =", "= parse_args() # number of training samples num_train_samples = args.num_train_samples # number of", "input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[...,", "cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\"", "u = psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy()", "(psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for", "x_train = [xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd", "psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x,", "algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x,", "'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v')", "vmax = 1.1e-2 vmin = 0.0 if (title == 'dp'): vmax = 4.1e-1", "gs = GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1], x,", "== 'du'): vmax = 1.1e-1 vmin = 0.0 if (title == 'dv'): vmax", "1, num_test_samples) x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) #", "inlet flow velocity u0 = 1 # density rho = 1 # viscosity", "plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0,", "contour(gs[1, 1], x, y, v, 'v') data = [x, y, psi, p, u,", "np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) #", "output (psi, p). Args: xy: network input variables as ndarray. Returns: (u, v)", "1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) +", "GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x,", "(title == 'dv'): vmax = 8.1e-2 vmin = 0.0 # plot a contour", "num_test_samples = args.num_test_samples # inlet flow velocity u0 = 1 # density rho", "y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p)", "input variables as ndarray. Returns: (u, v) as ndarray. \"\"\" xy = tf.constant(xy)", "default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities (u, v) for", "== 'p'): vmax = 6.1e-1 vmin = -5e-1 if (title == 'u'): vmax", "create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom", "(title == 'u'): vmax = 1.1e+0 vmin = -2e-1 if (title == 'v'):", "0], x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') data =", "1.1e-2 vmin = 0.0 if (title == 'dp'): vmax = 4.1e-1 vmin =", "plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs", "type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int,", "parse_args() # number of training samples num_train_samples = args.num_train_samples # number of test", "y, u, 'u') contour(gs[1, 1], x, y, v, 'v') data = [x, y,", "'dpsi'): vmax = 1.1e-2 vmin = 0.0 if (title == 'dp'): vmax =", "0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub,", "as g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[...,", "is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0]", "a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin,", "plots x = np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y", "= np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position", "# build a PINN model model = PINN(network, rho=rho, nu=nu).build() # create training", "meshgrid coordinates (x, y) for test plots x = np.linspace(0, 1, num_test_samples) y", "the cavity flow governed by the steady Navier-Stokes equation. \"\"\" args = parse_args()", "import Network from lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter',", "pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test the physics", "xy) u = psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0] return u.numpy(),", "[ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute (u, v) u,", "z, title, levels=50): \"\"\" Contour plot. Args: grid: plot position. x: x-array. y:", "[zeros, zeros, uv_bnd] # train the model using L-BFGS-B algorithm optimizer = Optimizer(model=model,", "parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n',", "zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 *", "v) u, v = uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if", "'p'): vmax = 6.1e-1 vmin = -5e-1 if (title == 'u'): vmax =", "= np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position", "xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) #", "'du'): vmax = 1.1e-1 vmin = 0.0 if (title == 'dv'): vmax =", "title string. levels: number of contour lines. \"\"\" # get the value range", "from lib.pinn import PINN from lib.network import Network from lib.optimizer import Optimizer def", "Contour plot. Args: grid: plot position. x: x-array. y: y-array. z: z-array. title:", "create meshgrid coordinates (x, y) for test plots x = np.linspace(0, 1, num_test_samples)", "for the network with output (psi, p). Args: xy: network input variables as", "[xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples,", "# top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or", "format='%.0e') cbar.mappable.set_clim(vmin, vmax) if __name__ == '__main__': \"\"\" Test the physics informed neural", "np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is", "2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train", "import os import pickle import argparse from lib.pinn import PINN from lib.network import", "vmax = 2e-1 if (title == 'psi'): vmax = 1.2e-1 vmin = -1e-1", "fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, psi,", "u, 'u') contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time())", "nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2)", "title, levels=50): \"\"\" Contour plot. Args: grid: plot position. x: x-array. y: y-array.", "y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show()", "np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[...,", "0.0 if (title == 'dv'): vmax = 8.1e-2 vmin = 0.0 # plot", "plot. Args: grid: plot position. x: x-array. y: y-array. z: z-array. title: title", "= np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p", "= args.num_train_samples # number of test samples num_test_samples = args.num_test_samples # inlet flow", "(title == 'dp'): vmax = 4.1e-1 vmin = 0.0 if (title == 'du'):", "Compute flow velocities (u, v) for the network with output (psi, p). Args:", "'u'): vmax = 1.1e+0 vmin = -2e-1 if (title == 'v'): vmax =", "xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2))", "y) for test plots x = np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1,", "y-array. z: z-array. title: title string. levels: number of contour lines. \"\"\" #", "flow velocities (u, v) for the network with output (psi, p). Args: xy:", "f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data fig", "xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j =", "Returns: (u, v) as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as g:", "levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m =", "neural network (PINN) model for the cavity flow governed by the steady Navier-Stokes", "return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities (u, v) for the", "== 'u'): vmax = 1.1e+0 vmin = -2e-1 if (title == 'v'): vmax", "if (title == 'p'): vmax = 6.1e-1 vmin = -5e-1 if (title ==", "psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v", "= -5e-1 if (title == 'u'): vmax = 1.1e+0 vmin = -2e-1 if", "argparse from lib.pinn import PINN from lib.network import Network from lib.optimizer import Optimizer", "Normalize from matplotlib.gridspec import GridSpec import os import pickle import argparse from lib.pinn", "0, 0] return u.numpy(), v.numpy() def contour(grid, x, y, z, title, levels=50): \"\"\"", "plot test results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0],", "tf import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import Normalize", "norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax)", "(title == 'p'): vmax = 6.1e-1 vmin = -5e-1 if (title == 'u'):", "vmax = 4.1e-1 vmin = 0.0 if (title == 'du'): vmax = 1.1e-1", "# number of test samples num_test_samples = args.num_test_samples # inlet flow velocity u0", "= -2e-1 if (title == 'dpsi'): vmax = 1.1e-2 vmin = 0.0 if", "vmax = 8.1e-2 vmin = 0.0 # plot a contour plt.subplot(grid) print(title, vmin,", "governed by the steady Navier-Stokes equation. \"\"\" args = parse_args() # number of", "y: y-array. z: z-array. title: title string. levels: number of contour lines. \"\"\"", "samples num_test_samples = args.num_test_samples # inlet flow velocity u0 = 1 # density", "type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str,", "vmin = 0.0 if (title == 'du'): vmax = 1.1e-1 vmin = 0.0", "type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities (u, v)", "p, u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__()", "contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v", "-psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid, x, y, z, title, levels=50):", "y, psi, 'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0], x, y,", "# viscosity nu = 0.01 # build a core network model network =", "x-array. y: y-array. z: z-array. title: title string. levels: number of contour lines.", "contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u", "model for the cavity flow governed by the steady Navier-Stokes equation. \"\"\" args", "time import lib.tf_silent import numpy as np import tensorflow as tf import matplotlib.pyplot", "1] v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid, x, y,", "x-position is 0 or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd]", "1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0])", "(x, y) for test plots x = np.linspace(0, 1, num_test_samples) y = np.linspace(0,", "'_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0],", "1]) # y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right", "of contour lines. \"\"\" # get the value range vmin = -2e-1 vmax", "output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0", "- v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig", "import matplotlib.cm as cm from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import", "plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else: # plot test", "(psi, p). Args: xy: network input variables as ndarray. Returns: (u, v) as", "vmin = 0.0 if (title == 'dp'): vmax = 4.1e-1 vmin = 0.0", "of test samples num_test_samples = args.num_test_samples # inlet flow velocity u0 = 1", "informed neural network (PINN) model for the cavity flow governed by the steady", "1], x, y, p, 'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1],", "x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png'))", "'--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute", "v, 'v') data = [x, y, psi, p, u, v] with open(args.gt_path, 'wb')", "GridSpec import os import pickle import argparse from lib.pinn import PINN from lib.network", "vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar =", "'--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str,", "psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute", "vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z,", "args.num_test_samples # inlet flow velocity u0 = 1 # density rho = 1", "(title == 'v'): vmax = 2.1e-1 vmin = -2e-1 if (title == 'dpsi'):", "grid: plot position. x: x-array. y: y-array. z: z-array. title: title string. levels:", "= 1.1e+0 vmin = -2e-1 if (title == 'v'): vmax = 2.1e-1 vmin", "x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt),", "or 1 xy_bnd = np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training", "Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int,", "y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin,", "for test plots x = np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples)", "vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow',", "uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb')", "u.numpy(), v.numpy() def contour(grid, x, y, z, title, levels=50): \"\"\" Contour plot. Args:", "in range(psi_p.shape[-1]) ] # compute (u, v) u, v = uv(network, xy) u", "v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data = pickle.load(f)", "or 1 xy_lr = np.random.rand(num_train_samples//2, 2) # left-right boundaries xy_lr[..., 0] = np.round(xy_lr[...,", "print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y,", "- psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0],", "= data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x,", "p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for i", "1]) y_train = [zeros, zeros, uv_bnd] # train the model using L-BFGS-B algorithm", "contour(grid, x, y, z, title, levels=50): \"\"\" Contour plot. Args: grid: plot position.", "import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples',", "type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str,", "0], x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v -", "y, p, 'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x, y,", "range vmin = -2e-1 vmax = 2e-1 if (title == 'psi'): vmax =", "velocity u0 = 1 # density rho = 1 # viscosity nu =", "vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m,", "'__main__': \"\"\" Test the physics informed neural network (PINN) model for the cavity", "# get the value range vmin = -2e-1 vmax = 2e-1 if (title", "'v'): vmax = 2.1e-1 vmin = -2e-1 if (title == 'dpsi'): vmax =", "np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y)", "pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6, 5))", "str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2)", "lib.tf_silent import numpy as np import tensorflow as tf import matplotlib.pyplot as plt", "'dv') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6,", "np.random.permutation(np.concatenate([xy_ub, xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples,", "number of training samples num_train_samples = args.num_train_samples # number of test samples num_test_samples", "default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l', '--loss', type=str, default='l2')", "np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du') contour(gs[1,", "v.numpy() def contour(grid, x, y, z, title, levels=50): \"\"\" Contour plot. Args: grid:", "vmin = 0.0 if (title == 'dv'): vmax = 8.1e-2 vmin = 0.0", "g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0] return", "vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow',", "= [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute (u, v)", "build a core network model network = Network().build() network.summary() # build a PINN", "'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp') contour(gs[1, 0], x, y,", "+ str(time.time()) + '.png')) plt.show() plt.close() else: # plot test results fig =", "flow velocity u0 = 1 # density rho = 1 # viscosity nu", "== 'v'): vmax = 2.1e-1 vmin = -2e-1 if (title == 'dpsi'): vmax", "network with output (psi, p). Args: xy: network input variables as ndarray. Returns:", "1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2,", "str(time.time()) + '.png')) plt.show() plt.close() else: # plot test results fig = plt.figure(figsize=(6,", "v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else: #", "lib.network import Network from lib.optimizer import Optimizer def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i',", "2.1e-1 vmin = -2e-1 if (title == 'dpsi'): vmax = 1.1e-2 vmin =", "from matplotlib.gridspec import GridSpec import os import pickle import argparse from lib.pinn import", "position. x: x-array. y: y-array. z: z-array. title: title string. levels: number of", "if (title == 'u'): vmax = 1.1e+0 vmin = -2e-1 if (title ==", "- u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout() plt.savefig(os.path.join('figures',", "u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data =", "np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] # train the model using L-BFGS-B", "train the model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit()", "create training output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0]", "range(psi_p.shape[-1]) ] # compute (u, v) u, v = uv(network, xy) u =", "= Network().build() network.summary() # build a PINN model model = PINN(network, rho=rho, nu=nu).build()", "5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt), 'dpsi')", "core network model network = Network().build() network.summary() # build a PINN model model", "= u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] # train the", "fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi", "np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1)", "0.0 # plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k',", "0.0 if (title == 'dp'): vmax = 4.1e-1 vmin = 0.0 if (title", "y_gt, psi_gt, p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6, 5)) gs =", "= np.random.rand(num_train_samples, 2) xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] =", "using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid", "type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network,", "num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y) xy =", "y, psi, p, u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout()", "np.abs(u - u_gt), 'du') contour(gs[1, 1], x, y, np.abs(v - v_gt), 'dv') plt.tight_layout()", "i in range(psi_p.shape[-1]) ] # compute (u, v) u, v = uv(network, xy)", "import pickle import argparse from lib.pinn import PINN from lib.network import Network from", "- p_gt), 'dp') contour(gs[1, 0], x, y, np.abs(u - u_gt), 'du') contour(gs[1, 1],", "with open(args.gt_path, 'wb') as f: pickle.dump(data, f) plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) +", "0], x, y, psi, 'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0],", "Test the physics informed neural network (PINN) model for the cavity flow governed", "4.1e-1 vmin = 0.0 if (title == 'du'): vmax = 1.1e-1 vmin =", "x, y, v, 'v') data = [x, y, psi, p, u, v] with", "Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y) for test", "data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data fig =", "pickle import argparse from lib.pinn import PINN from lib.network import Network from lib.optimizer", "psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute (u, v) u, v", "parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn') parser.add_argument('-l',", "y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y) for test plots x", "pdb import time import lib.tf_silent import numpy as np import tensorflow as tf", "'--loss', type=str, default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def", "model = PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn = np.random.rand(num_train_samples, 2)", "default='l2') parser.add_argument('-gi', '--gradient-interval', type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy):", "physics informed neural network (PINN) model for the cavity flow governed by the", "= np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()],", "compute (u, v) u, v = uv(network, xy) u = u.reshape(x.shape) v =", "0] = u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] # train", "from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import os import pickle import", "z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin,", "1.1e+0 vmin = -2e-1 if (title == 'v'): vmax = 2.1e-1 vmin =", "as plt import matplotlib.cm as cm from matplotlib.colors import Normalize from matplotlib.gridspec import", "\"\"\" Contour plot. Args: grid: plot position. x: x-array. y: y-array. z: z-array.", "plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '_error.png')) plt.show() plt.close() fig = plt.figure(figsize=(6, 5))", "= -2e-1 if (title == 'v'): vmax = 2.1e-1 vmin = -2e-1 if", "list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else: # plot test results fig", "(u, v) for the network with output (psi, p). Args: xy: network input", "test results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x,", "density rho = 1 # viscosity nu = 0.01 # build a core", "as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p =", "psi_p = network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for i in", "plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png')) plt.show() plt.close() else: # plot test results", "contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x, y, v, 'v') data", "the network with output (psi, p). Args: xy: network input variables as ndarray.", "xy_ub = np.random.rand(num_train_samples//2, 2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) #", "build a PINN model model = PINN(network, rho=rho, nu=nu).build() # create training input", "xy_lr])) x_train = [xy_eqn, xy_bnd] # create training output zeros = np.zeros((num_train_samples, 2))", "default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100) parser.add_argument('-n', '--network', type=str, default='pinn')", "zeros, uv_bnd] # train the model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train,", "(u, v) u, v = uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape)", "network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v = -psi_p_j[...,", "v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid, x, y, z,", "psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v = -psi_p_j[..., 0,", "= 2.1e-1 vmin = -2e-1 if (title == 'dpsi'): vmax = 1.1e-2 vmin", "vmin = -2e-1 if (title == 'dpsi'): vmax = 1.1e-2 vmin = 0.0", "for the cavity flow governed by the steady Navier-Stokes equation. \"\"\" args =", "* np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros, uv_bnd] # train the model using", "'dp'): vmax = 4.1e-1 vmin = 0.0 if (title == 'du'): vmax =", "import Normalize from matplotlib.gridspec import GridSpec import os import pickle import argparse from", "y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt), 'dp')", "as f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data", "v_gt = data fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0],", "plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2) contour(gs[0, 0], x, y, np.abs(psi - psi_gt),", "optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create meshgrid coordinates (x, y)", "'.png')) plt.show() plt.close() else: # plot test results fig = plt.figure(figsize=(6, 5)) gs", "'v') data = [x, y, psi, p, u, v] with open(args.gt_path, 'wb') as", "colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels, vmin=vmin, vmax=vmax) plt.title(title)", "xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr =", "'rb') as f: data = pickle.load(f) x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt =", "v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt, y_gt,", "v) for the network with output (psi, p). Args: xy: network input variables", "PINN model model = PINN(network, rho=rho, nu=nu).build() # create training input xy_eqn =", "GridSpec(2, 2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1], x, y, p,", "with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u", "x, y, psi, 'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1, 0], x,", "plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax) plt.contourf(x, y, z, cmap='rainbow', levels=levels,", "x: x-array. y: y-array. z: z-array. title: title string. levels: number of contour", "-2e-1 if (title == 'dpsi'): vmax = 1.1e-2 vmin = 0.0 if (title", "top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0 or 1", "np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy)) psi, p", "if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt, y_gt, psi_gt,", "vmin = -5e-1 if (title == 'u'): vmax = 1.1e+0 vmin = -2e-1", "network.predict(xy, batch_size=len(xy)) psi, p = [ psi_p[..., i].reshape(x.shape) for i in range(psi_p.shape[-1]) ]", "parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow velocities (u, v) for the network", "= 0.01 # build a core network model network = Network().build() network.summary() #", "u, 'u') contour(gs[1, 1], x, y, v, 'v') data = [x, y, psi,", "g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1]", "np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1])", "vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03, aspect=25, format='%.0e') cbar.mappable.set_clim(vmin, vmax) if", "psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def contour(grid,", "title: title string. levels: number of contour lines. \"\"\" # get the value", "viscosity nu = 0.01 # build a core network model network = Network().build()", "= u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data", "tensorflow as tf import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors", "x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p - p_gt),", "x, y, z, title, levels=50): \"\"\" Contour plot. Args: grid: plot position. x:", "left-right boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1", "uv(network, xy): \"\"\" Compute flow velocities (u, v) for the network with output", "i].reshape(x.shape) for i in range(psi_p.shape[-1]) ] # compute (u, v) u, v =", "plt.show() plt.close() else: # plot test results fig = plt.figure(figsize=(6, 5)) gs =", "-2e-1 if (title == 'v'): vmax = 2.1e-1 vmin = -2e-1 if (title", "p, 'p') contour(gs[1, 0], x, y, u, 'u') contour(gs[1, 1], x, y, v,", "np.meshgrid(x, y) xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p =", "def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000)", "matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import Normalize from matplotlib.gridspec", "x_gt, y_gt, psi_gt, p_gt, u_gt, v_gt = data fig = plt.figure(figsize=(6, 5)) gs", "np import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.cm as cm", "equation. \"\"\" args = parse_args() # number of training samples num_train_samples = args.num_train_samples", "# build a core network model network = Network().build() network.summary() # build a", "contour(gs[1, 1], x, y, v, 'v') plt.tight_layout() plt.savefig(os.path.join('figures', list(args.__dict__.values())[:-1].__str__() + str(time.time()) + '.png'))", "argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int, default=100)", "1], x, y, v, 'v') data = [x, y, psi, p, u, v]", "if (title == 'dp'): vmax = 4.1e-1 vmin = 0.0 if (title ==", "training output zeros = np.zeros((num_train_samples, 2)) uv_bnd = np.zeros((num_train_samples, 2)) uv_bnd[..., 0] =", "model network = Network().build() network.summary() # build a PINN model model = PINN(network,", "rho = 1 # viscosity nu = 0.01 # build a core network", "= np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y = np.meshgrid(x,", "= psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0] return u.numpy(), v.numpy() def", "data = [x, y, psi, p, u, v] with open(args.gt_path, 'wb') as f:", "xy): \"\"\" Compute flow velocities (u, v) for the network with output (psi,", "levels=50): \"\"\" Contour plot. Args: grid: plot position. x: x-array. y: y-array. z:", "samples num_train_samples = args.num_train_samples # number of test samples num_test_samples = args.num_test_samples #", "Args: grid: plot position. x: x-array. y: y-array. z: z-array. title: title string.", "= 1 # viscosity nu = 0.01 # build a core network model", "np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros, zeros,", "matplotlib.gridspec import GridSpec import os import pickle import argparse from lib.pinn import PINN", "vmax = 1.2e-1 vmin = -1e-1 if (title == 'p'): vmax = 6.1e-1", "v = uv(network, xy) u = u.reshape(x.shape) v = v.reshape(x.shape) if os.path.isfile(args.gt_path): with", "= argparse.ArgumentParser() parser.add_argument('-i', '--maxiter', type=int, default=2000) parser.add_argument('-ntr', '--num-train-samples', type=int, default=10000) parser.add_argument('-nte', '--num-test-samples', type=int,", "xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd =", "plt.title(title) m = plt.cm.ScalarMappable(cmap='rainbow', norm=Normalize(vmin=vmin, vmax=vmax)) m.set_array(z) m.set_clim(vmin, vmax) cbar = plt.colorbar(m, pad=0.03,", "[x, y, psi, p, u, v] with open(args.gt_path, 'wb') as f: pickle.dump(data, f)", "model using L-BFGS-B algorithm optimizer = Optimizer(model=model, x_train=x_train, y_train=y_train, dict_params=args.__dict__) optimizer.fit() # create", "if (title == 'v'): vmax = 2.1e-1 vmin = -2e-1 if (title ==", "boundaries xy_lr[..., 0] = np.round(xy_lr[..., 0]) # x-position is 0 or 1 xy_bnd", "0], x, y, np.abs(psi - psi_gt), 'dpsi') contour(gs[0, 1], x, y, np.abs(p -", "'u') contour(gs[1, 1], x, y, v, 'v') data = [x, y, psi, p,", "import lib.tf_silent import numpy as np import tensorflow as tf import matplotlib.pyplot as", "test plots x = np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x,", "vmax = 2.1e-1 vmin = -2e-1 if (title == 'dpsi'): vmax = 1.1e-2", "contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y, z, colors='k', linewidths=0.2, levels=levels, vmin=vmin, vmax=vmax)", "2) contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1], x, y, p, 'p')", "= np.zeros((num_train_samples, 2)) uv_bnd[..., 0] = u0 * np.floor(xy_bnd[..., 1]) y_train = [zeros,", "get the value range vmin = -2e-1 vmax = 2e-1 if (title ==", "else: # plot test results fig = plt.figure(figsize=(6, 5)) gs = GridSpec(2, 2)", "network = Network().build() network.summary() # build a PINN model model = PINN(network, rho=rho,", "2e-1 if (title == 'psi'): vmax = 1.2e-1 vmin = -1e-1 if (title", "x = np.linspace(0, 1, num_test_samples) y = np.linspace(0, 1, num_test_samples) x, y =", "= g.batch_jacobian(psi_p, xy) u = psi_p_j[..., 0, 1] v = -psi_p_j[..., 0, 0]", "= 1 # density rho = 1 # viscosity nu = 0.01 #", "contour(gs[0, 0], x, y, psi, 'psi') contour(gs[0, 1], x, y, p, 'p') contour(gs[1,", "optimizer.fit() # create meshgrid coordinates (x, y) for test plots x = np.linspace(0,", "= v.reshape(x.shape) if os.path.isfile(args.gt_path): with open(args.gt_path, 'rb') as f: data = pickle.load(f) x_gt,", "a core network model network = Network().build() network.summary() # build a PINN model", "network.summary() # build a PINN model model = PINN(network, rho=rho, nu=nu).build() # create", "y, v, 'v') data = [x, y, psi, p, u, v] with open(args.gt_path,", "2) # top-bottom boundaries xy_ub[..., 1] = np.round(xy_ub[..., 1]) # y-position is 0", "numpy as np import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.cm", "= tf.constant(xy) with tf.GradientTape() as g: g.watch(xy) psi_p = network(xy) psi_p_j = g.batch_jacobian(psi_p,", "network input variables as ndarray. Returns: (u, v) as ndarray. \"\"\" xy =", "xy = np.stack([x.flatten(), y.flatten()], axis=-1) # predict (psi, p) psi_p = network.predict(xy, batch_size=len(xy))", "as cm from matplotlib.colors import Normalize from matplotlib.gridspec import GridSpec import os import", "type=int, default=100) parser.add_argument('--gt-path', type=str, default='data/pinn.pkl') return parser.parse_known_args()[0] def uv(network, xy): \"\"\" Compute flow", "ndarray. Returns: (u, v) as ndarray. \"\"\" xy = tf.constant(xy) with tf.GradientTape() as", "vmin = 0.0 # plot a contour plt.subplot(grid) print(title, vmin, vmax) plt.contour(x, y,", "-1e-1 if (title == 'p'): vmax = 6.1e-1 vmin = -5e-1 if (title", "string. levels: number of contour lines. \"\"\" # get the value range vmin", "= np.round(xy_ub[..., 1]) # y-position is 0 or 1 xy_lr = np.random.rand(num_train_samples//2, 2)" ]
[ "error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y,", "be evaluated :param mondrian_forest: flag indicating whether mondrian forest should be evaluated :param", "squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = [] if", "containing all results \"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all =", "1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train,", "+ 1) # move datapoints from split feature to child features Z_all.indices[feature_l *", "np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None", "a validation set should be created by halving the test set :param mondrian_kernel:", "* N_all + 1, M) indices = range(M) * N_all data = np.ones(N_all", "results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if", "cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add new cuts to heap", "import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False,", "and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test", "'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel']", "error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions", "uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r,", "np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r,", "test error = %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\")", "N_all = N + N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test", "np.empty((0, X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M) * res['Z'] # undo", "- y_mean # start timer time_start = time.clock() # initialize sparse feature matrix", "w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation = [] list_kernel_error_test =", "if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N]", ":param mondrian_forest: flag indicating whether mondrian forest should be evaluated :param weights_from_lifetime: lifetime", "= [] if mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train =", "feature being split events = [] active_features = [] active_features_in_tree = [[] for", "Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])),", "None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel)", "Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees)", "[] if mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N,", "copy=False) # sample the cut for each child lX_l = np.min(X_all[feature_l, :], axis=0)", "error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock()", "through Mondrian kernels with all lifetime in [0, lifetime_max]. This can be used", "M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M)", "[] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if", "M + m] = C + 0 Z_all.indices[feature_r * M + m] =", "\"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all", "X_all = np.array(np.r_[X, X_test]) N_all = N + N_test if mondrian_forest or mondrian_kernel:", "active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move datapoints from split feature to", "Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)],", "all lifetime in [0, lifetime_max]. This can be used to (1) construct a", "X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l)", "loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r))", "kernel width), or to (3) compare Mondrian kernel to Mondrian forest across lifetimes.", "= np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r =", "bounding box for all datapoints used to sample first cut in each tree", "y - y_mean # start timer time_start = time.clock() # initialize sparse feature", "be saved :return: dictionary res containing all results \"\"\" N, D = np.shape(X)", "all results \"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X,", "np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add new", "'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] =", "= evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M) *", "trees :param lifetime_max: terminal lifetime :param delta: ridge regression regularization hyperparameter :param validation:", "dim, loc), where feature is the index of feature being split events =", "'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None:", "np.mean(y) y_train = y - y_mean # start timer time_start = time.clock() #", "SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel =", "= 0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf =", "in increasing order list_times = [] list_runtime = [] if mondrian_forest: w_trees =", "mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means y_mean =", "time_start = time.clock() # initialize sparse feature matrix indptr = range(0, M *", "all values of interest stored in it results = {'times': list_times, 'runtimes': list_runtime,", "validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all lifetime in", "update Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] =", "indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C = M # bounding box", "to (3) compare Mondrian kernel to Mondrian forest across lifetimes. :param X: training", "% (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns", "= [] while len(events) > 0: (birth_time, m, c, dim, loc) = heapq.heappop(events)", "active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime", "y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test)", "tree, feature, dim, loc), where feature is the index of feature being split", "lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r", "0) uX = np.max(X_all, 0) # event = tuple (time, tree, feature, dim,", "lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l", "[] list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train", "set should be created by halving the test set :param mondrian_kernel: flag indicating", "loc = sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m,", "error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() -", "= errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if", "= list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test", "to (2) find a suitable lifetime (inverse kernel width), or to (3) compare", "loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r,", "/ np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C", "dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r =", "list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train", "= linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is", "> 0: (birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new", "mondrian_forest: # update Mondrian forest predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]]", "sys.stdout.write(\"\\n\") # this function returns a dictionary with all values of interest stored", "sample first cut in each tree feature_data = [np.array(range(N_all)) for _ in range(M)]", "each tree feature_data = [np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all, 0)", "[] active_features_in_tree = [[] for _ in range(M)] for m in range(M): cut_time,", "= np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation = [] list_kernel_error_test = []", "with lifetime lifetime_max, to (2) find a suitable lifetime (inverse kernel width), or", "if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation:", "number of Mondrian trees :param lifetime_max: terminal lifetime :param delta: ridge regression regularization", "(inverse kernel width), or to (3) compare Mondrian kernel to Mondrian forest across", "weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all lifetime in [0, lifetime_max]. This", "= X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc]", "if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C =", "np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train']", "M, lifetime, None) Z = np.sqrt(M) * res['Z'] # undo normalization return Z,", "in range(M)] for m in range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0)", "if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) #", "feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd", "list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <=", "= range(M) * N_all data = np.ones(N_all * M) / np.sqrt(M) Z_all =", "it results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest:", "kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train", "None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M) * res['Z'] #", "< lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through", "D = np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N", "loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c], dim] feature_l", "m in range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0) if cut_time <", "in range(M)] lX = np.min(X_all, 0) uX = np.max(X_all, 0) # event =", "results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] =", "whether mondrian kernel should be evaluated :param mondrian_forest: flag indicating whether mondrian forest", "[np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree", "hyperparameter :param validation: flag indicating whether a validation set should be created by", "= range(M) C = M # bounding box for all datapoints used to", "test regression targets :param M: number of Mondrian trees :param lifetime_max: terminal lifetime", "interest stored in it results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition':", "errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None):", "event = tuple (time, tree, feature, dim, loc), where feature is the index", "1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian", "= {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime", "# construct new feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc]", ":param X: training inputs :param y: training regression targets :param X_test: test inputs", "= [] active_features = [] active_features_in_tree = [[] for _ in range(M)] for", "errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel:", "cut for each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :],", "y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test)", "if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train", "used to sample first cut in each tree feature_data = [np.array(range(N_all)) for _", "birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:,", "training regression targets :param X_test: test inputs :param y_test: test regression targets :param", "lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l)) if cut_time_r < lifetime_max:", "means y_mean = np.mean(y) y_train = y - y_mean # start timer time_start", "import time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M,", "(cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if", "sample_cut(lX_r, uX_r, birth_time) # add new cuts to heap if cut_time_l < lifetime_max:", "= y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)])", "indicating whether mondrian kernel should be evaluated :param mondrian_forest: flag indicating whether mondrian", "active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M *", "w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is", "# event = tuple (time, tree, feature, dim, loc), where feature is the", "y_mean # start timer time_start = time.clock() # initialize sparse feature matrix indptr", ":], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add new cuts", "(1) construct a Mondrian feature map with lifetime lifetime_max, to (2) find a", "(birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a", "indicating whether mondrian forest should be evaluated :param weights_from_lifetime: lifetime at which forest", "= np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r =", "weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train", "in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions and squared", "= y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train)", "cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r", "time_start) if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C", "_ in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions and", "= y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation", ":param delta: ridge regression regularization hyperparameter :param validation: flag indicating whether a validation", "X_test]) N_all = N + N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y)", "import sys import time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test,", "clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not None and birth_time", "regression targets :param M: number of Mondrian trees :param lifetime_max: terminal lifetime :param", "trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error", "np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update", "(2) find a suitable lifetime (inverse kernel width), or to (3) compare Mondrian", "0 Z_all.indices[feature_r * M + m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data,", "loc), where feature is the index of feature being split events = []", "0) # event = tuple (time, tree, feature, dim, loc), where feature is", "y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train,", "list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None: results['w_forest']", "if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y,", "list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if", "m] = C + 0 Z_all.indices[feature_r * M + m] = C +", "\"\"\" Sweeps through Mondrian kernels with all lifetime in [0, lifetime_max]. This can", "(3) compare Mondrian kernel to Mondrian forest across lifetimes. :param X: training inputs", "Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta", "Z_test = Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train,", "res containing all results \"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all", "uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) #", "= tuple (time, tree, feature, dim, loc), where feature is the index of", ":], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l,", "m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest:", "whether a validation set should be created by halving the test set :param", "console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test error = %.3f)\" %", "M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z =", "= Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta /", "uX = np.max(X_all, 0) # event = tuple (time, tree, feature, dim, loc),", "axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0)", "# start timer time_start = time.clock() # initialize sparse feature matrix indptr =", "y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\", "y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels", "[0, lifetime_max]. This can be used to (1) construct a Mondrian feature map", "Mondrian feature map with lifetime lifetime_max, to (2) find a suitable lifetime (inverse", "= np.squeeze(y_test) # subtract target means y_mean = np.mean(y) y_train = y -", "= [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = []", "np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest", "X: training inputs :param y: training regression targets :param X_test: test inputs :param", "{'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is", "y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train)", "weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train =", "= (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C +", "coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not None and birth_time <= weights_from_lifetime:", "w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update", "= np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not", "feature_data = [np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all, 0) uX =", "# add new cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m,", "C + 0 Z_all.indices[feature_r * M + m] = C + 1 Z_all", "%.2E (C = %d, test error = %.3f)\" % (birth_time, lifetime_max, C, error_test))", "start timer time_start = time.clock() # initialize sparse feature matrix indptr = range(0,", "if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation", "targets :param M: number of Mondrian trees :param lifetime_max: terminal lifetime :param delta:", "the cut for each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l,", "in [0, lifetime_max]. This can be used to (1) construct a Mondrian feature", "w_kernel[c] = 0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf", "sys import time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test,", "error = %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") #", "utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False,", "to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l,", "not None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean +", "<= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean", "for m in range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0) if cut_time", "timer time_start = time.clock() # initialize sparse feature matrix indptr = range(0, M", "C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary with", "lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all", "= sample_cut(lX_r, uX_r, birth_time) # add new cuts to heap if cut_time_l <", "N_all + 1, M) indices = range(M) * N_all data = np.ones(N_all *", "1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if", "list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E /", "results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M,", "to (1) construct a Mondrian feature map with lifetime lifetime_max, to (2) find", "and birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree))", "lifetime in [0, lifetime_max]. This can be used to (1) construct a Mondrian", "=\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation)", "progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test error", "mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all lifetime in [0,", "cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate", "M)) feature_from_repetition = range(M) C = M # bounding box for all datapoints", "loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0)", "sklearn import linear_model import sys import time from utils import sample_cut, errors_regression def", "C += 2 if mondrian_forest: # update Mondrian forest predictions in tree m", "list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]])", ":param mondrian_kernel: flag indicating whether mondrian kernel should be evaluated :param mondrian_forest: flag", "datapoints used to sample first cut in each tree feature_data = [np.array(range(N_all)) for", ":param lifetime_max: terminal lifetime :param delta: ridge regression regularization hyperparameter :param validation: flag", "being split events = [] active_features = [] active_features_in_tree = [[] for _", "C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False)", "= scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample the cut", "M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time <= weights_from_lifetime:", "+ Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test", "trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel: w_kernel", "y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train,", "= np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N + N_test if mondrian_forest", "(time, tree, feature, dim, loc), where feature is the index of feature being", "= np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N +", "lX = np.min(X_all, 0) uX = np.max(X_all, 0) # event = tuple (time,", "= w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) #", "= errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start)", "lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z", "matrix indptr = range(0, M * N_all + 1, M) indices = range(M)", "np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree", "of feature being split events = [] active_features = [] active_features_in_tree = [[]", "new feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r =", "= heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c], dim] feature_l =", "= [] list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M)", "birth times in increasing order list_times = [] list_runtime = [] if mondrian_forest:", "+ 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move datapoints from split", "feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c)", "dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in increasing order list_times", "active_features_in_tree = [[] for _ in range(M)] for m in range(M): cut_time, dim,", "= list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0,", "for all datapoints used to sample first cut in each tree feature_data =", "X_test: test inputs :param y_test: test regression targets :param M: number of Mondrian", "scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample the cut for", "None, M, lifetime, None) Z = np.sqrt(M) * res['Z'] # undo normalization return", "Sweeps through Mondrian kernels with all lifetime in [0, lifetime_max]. This can be", "M # bounding box for all datapoints used to sample first cut in", "= (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c)", "events = [] active_features = [] active_features_in_tree = [[] for _ in range(M)]", "np.squeeze(y_test) # subtract target means y_mean = np.mean(y) y_train = y - y_mean", "delta: ridge regression regularization hyperparameter :param validation: flag indicating whether a validation set", ":param X_test: test inputs :param y_test: test regression targets :param M: number of", "(C = %d, test error = %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush()", "_ in range(M)] for m in range(M): cut_time, dim, loc = sample_cut(lX, uX,", "dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c], dim]", "a Mondrian feature map with lifetime lifetime_max, to (2) find a suitable lifetime", "Mondrian forest predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:,", "N + N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test)", "cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l)) if cut_time_r", "heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events,", "np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train = y_mean", "= 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_", "(cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in", "can be used to (1) construct a Mondrian feature map with lifetime lifetime_max,", "2), copy=False) # sample the cut for each child lX_l = np.min(X_all[feature_l, :],", "np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_trees[m] =", "where feature is the index of feature being split events = [] active_features", "loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in increasing order list_times =", "M)) list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save", "[[] for _ in range(M)] for m in range(M): cut_time, dim, loc =", "regression targets :param X_test: test inputs :param y_test: test regression targets :param M:", "1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample", "C + 0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C", "+ 0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C +", "order list_times = [] list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1) for", "np import scipy.sparse from sklearn import linear_model import sys import time from utils", "m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) +", "terminal lifetime :param delta: ridge regression regularization hyperparameter :param validation: flag indicating whether", "values of interest stored in it results = {'times': list_times, 'runtimes': list_runtime, 'Z':", "forest and kernel learned weights should be saved :return: dictionary res containing all", "range(M) * N_all data = np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data,", "mondrian forest should be evaluated :param weights_from_lifetime: lifetime at which forest and kernel", "list_kernel_error_train = [] if validation: list_kernel_error_validation = [] list_kernel_error_test = [] while len(events)", "list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator in", "results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if", "mondrian_kernel: flag indicating whether mondrian kernel should be evaluated :param mondrian_forest: flag indicating", "cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m)", "mondrian_forest: flag indicating whether mondrian forest should be evaluated :param weights_from_lifetime: lifetime at", "dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r,", "= [np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all, 0) uX = np.max(X_all,", "regression regularization hyperparameter :param validation: flag indicating whether a validation set should be", "* np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_trees[m]", "scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C = M # bounding", "= np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add", "y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: #", "weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean +", "y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel: w_kernel =", "np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N + N_test", "= Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs)", "= time.clock() # initialize sparse feature matrix indptr = range(0, M * N_all", "np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] =", "0: (birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature", "= %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this", "= np.min(X_all, 0) uX = np.max(X_all, 0) # event = tuple (time, tree,", "by halving the test set :param mondrian_kernel: flag indicating whether mondrian kernel should", "list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res", "uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r", "heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd", "= np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel: w_kernel =", "+ m] = C + 0 Z_all.indices[feature_r * M + m] = C", "y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test =", "feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C", "list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X,", "list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train =", "mondrian kernel should be evaluated :param mondrian_forest: flag indicating whether mondrian forest should", "kernel learned weights should be saved :return: dictionary res containing all results \"\"\"", "sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary with all values", "%.2E / %.2E (C = %d, test error = %.3f)\" % (birth_time, lifetime_max,", "= [[] for _ in range(M)] for m in range(M): cut_time, dim, loc", "should be evaluated :param mondrian_forest: flag indicating whether mondrian forest should be evaluated", "= np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test = Z_all[N:]", "1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: # update Mondrian", "Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample the", "m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in increasing", "0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta,", "numpy as np import scipy.sparse from sklearn import linear_model import sys import time", "= list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation']", "be evaluated :param weights_from_lifetime: lifetime at which forest and kernel learned weights should", "None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test", "heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times", "or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means y_mean", "m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C +", "+ 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: # update", "list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if mondrian_forest: if weights_from_lifetime is not", "indicating whether a validation set should be created by halving the test set", "from sklearn import linear_model import sys import time from utils import sample_cut, errors_regression", "y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through", "y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel: w_kernel", "to Mondrian forest across lifetimes. :param X: training inputs :param y: training regression", "this function returns a dictionary with all values of interest stored in it", "results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation:", "c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd = X_all[feature_data[c],", "np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C =", "y_mean = np.mean(y) y_train = y - y_mean # start timer time_start =", "uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m)", "lifetime, None) Z = np.sqrt(M) * res['Z'] # undo normalization return Z, res['feature_from_repetition']", "target means y_mean = np.mean(y) y_train = y - y_mean # start timer", "function returns a dictionary with all values of interest stored in it results", ":return: dictionary res containing all results \"\"\" N, D = np.shape(X) N_test =", "initialize sparse feature matrix indptr = range(0, M * N_all + 1, M)", "across lifetimes. :param X: training inputs :param y: training regression targets :param X_test:", "X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M) * res['Z'] # undo normalization", "m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd =", "# progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test", "halving the test set :param mondrian_kernel: flag indicating whether mondrian kernel should be", "evaluated :param mondrian_forest: flag indicating whether mondrian forest should be evaluated :param weights_from_lifetime:", "predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree", "= np.max(X_all, 0) # event = tuple (time, tree, feature, dim, loc), where", "Mondrian trees :param lifetime_max: terminal lifetime :param delta: ridge regression regularization hyperparameter :param", "kernel should be evaluated :param mondrian_forest: flag indicating whether mondrian forest should be", "= np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel:", "def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\"", "<reponame>matejbalog/mondrian-kernel import heapq import numpy as np import scipy.sparse from sklearn import linear_model", "all datapoints used to sample first cut in each tree feature_data = [np.array(range(N_all))", "np.max(X_all, 0) # event = tuple (time, tree, feature, dim, loc), where feature", "# save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator in console", "= range(0, M * N_all + 1, M) indices = range(M) * N_all", "= %d, test error = %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if", "np.array(np.r_[X, X_test]) N_all = N + N_test if mondrian_forest or mondrian_kernel: y =", "data = np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all,", "lifetime (inverse kernel width), or to (3) compare Mondrian kernel to Mondrian forest", "= [np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian", "indptr), shape=(N_all, M)) feature_from_repetition = range(M) C = M # bounding box for", "N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N + N_test if", "Z_all.indices[feature_l * M + m] = C + 0 Z_all.indices[feature_r * M +", "np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l,", "# bounding box for all datapoints used to sample first cut in each", "predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train =", "delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all lifetime", "range(0, M * N_all + 1, M) indices = range(M) * N_all data", "lifetime lifetime_max, to (2) find a suitable lifetime (inverse kernel width), or to", "suitable lifetime (inverse kernel width), or to (3) compare Mondrian kernel to Mondrian", "Mondrian kernel to Mondrian forest across lifetimes. :param X: training inputs :param y:", "axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time)", "/ M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time <=", "weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] =", "+ 2), copy=False) # sample the cut for each child lX_l = np.min(X_all[feature_l,", "features Z_all.indices[feature_l * M + m] = C + 0 Z_all.indices[feature_r * M", "M: number of Mondrian trees :param lifetime_max: terminal lifetime :param delta: ridge regression", "loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C +", "import linear_model import sys import time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X,", "uX_r, birth_time) # add new cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events,", "range(M) C = M # bounding box for all datapoints used to sample", "(feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C", "results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None,", "for _ in range(M)] lX = np.min(X_all, 0) uX = np.max(X_all, 0) #", "1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move datapoints from split feature", "M) indices = range(M) * N_all data = np.ones(N_all * M) / np.sqrt(M)", "update Mondrian forest predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test =", "np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation:", "# sample the cut for each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l", "range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions and squared errors", "# update Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test =", "find a suitable lifetime (inverse kernel width), or to (3) compare Mondrian kernel", "targets :param X_test: test inputs :param y_test: test regression targets :param M: number", "ridge regression regularization hyperparameter :param validation: flag indicating whether a validation set should", "Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample the cut for each child", "linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not", "error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):])", "for _ in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions", "feature to child features Z_all.indices[feature_l * M + m] = C + 0", "evaluated :param weights_from_lifetime: lifetime at which forest and kernel learned weights should be", "= y - y_mean # start timer time_start = time.clock() # initialize sparse", "[] active_features = [] active_features_in_tree = [[] for _ in range(M)] for m", "np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M)", "be created by halving the test set :param mondrian_kernel: flag indicating whether mondrian", "np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test,", "lifetime at which forest and kernel learned weights should be saved :return: dictionary", "/ np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian", "in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test error = %.3f)\"", "while len(events) > 0: (birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) #", "inputs :param y_test: test regression targets :param M: number of Mondrian trees :param", "y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel,", "list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator", "if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract target", "+ m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C", ":param y_test: test regression targets :param M: number of Mondrian trees :param lifetime_max:", "runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E", "time.clock() # initialize sparse feature matrix indptr = range(0, M * N_all +", "return results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None,", "not None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m]", "= Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train))", "Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if", "tree predictions and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test", "list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c],", "2 if mondrian_forest: # update Mondrian forest predictions in tree m Z_train =", "y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train,", "+ 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move", "a dictionary with all values of interest stored in it results = {'times':", "0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m)", "used to (1) construct a Mondrian feature map with lifetime lifetime_max, to (2)", "if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree /", "validation: list_kernel_error_validation = [] list_kernel_error_test = [] while len(events) > 0: (birth_time, m,", "Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C = M", "* M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition =", "= np.zeros((N, M)) # initialize Mondrian tree predictions and squared errors trees_y_hat_test =", "w_kernel = clf.coef_ if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_kernel_save", "validation: flag indicating whether a validation set should be created by halving the", "lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth", "= Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel)", "clf.coef_ if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features])", "subtract target means y_mean = np.mean(y) y_train = y - y_mean # start", "Mondrian tree predictions and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = []", "feature, dim, loc), where feature is the index of feature being split events", "mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with all lifetime in [0, lifetime_max].", "M + m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all,", "= np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1)", "np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs", "a suitable lifetime (inverse kernel width), or to (3) compare Mondrian kernel to", "feature_from_repetition = range(M) C = M # bounding box for all datapoints used", "< lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l)) if cut_time_r <", "feature_from_repetition.append(m) C += 2 if mondrian_forest: # update Mondrian forest predictions in tree", "np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r,", "Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None)", "* M + m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr),", "update Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean", "w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test']", "cut_time, dim, loc = sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time,", "shape=(N_all, C + 2), copy=False) # sample the cut for each child lX_l", "heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l))", "1, M) indices = range(M) * N_all data = np.ones(N_all * M) /", "add new cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C", "Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean +", "else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime", "mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N, M)) #", "cut in each tree feature_data = [np.array(range(N_all)) for _ in range(M)] lX =", "[] if validation: list_kernel_error_validation = [] list_kernel_error_test = [] while len(events) > 0:", "indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test error =", "y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not None and birth_time <=", "error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary with all", "dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add new cuts to heap if", "scipy.sparse from sklearn import linear_model import sys import time from utils import sample_cut,", "error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1)", "y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test =", "y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train,", "np.zeros((N, M)) # initialize Mondrian tree predictions and squared errors trees_y_hat_test = np.zeros((N_test,", "<= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m]", ":param y: training regression targets :param X_test: test inputs :param y_test: test regression", "at which forest and kernel learned weights should be saved :return: dictionary res", "lifetimes. :param X: training inputs :param y: training regression targets :param X_test: test", "construct a Mondrian feature map with lifetime lifetime_max, to (2) find a suitable", "import heapq import numpy as np import scipy.sparse from sklearn import linear_model import", "test set :param mondrian_kernel: flag indicating whether mondrian kernel should be evaluated :param", "M * N_all + 1, M) indices = range(M) * N_all data =", "= np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation = []", "Z_all.indices[feature_r * M + m] = C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices,", "y_test: test regression targets :param M: number of Mondrian trees :param lifetime_max: terminal", "lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C +=", "np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation = [] list_kernel_error_test = [] while", "of interest stored in it results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all,", "Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train,", "weights should be saved :return: dictionary res containing all results \"\"\" N, D", "= C + 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2),", "loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: # update Mondrian forest predictions", "+ delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and", "list_kernel_error_test = [] while len(events) > 0: (birth_time, m, c, dim, loc) =", "Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\", "X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian", "indices = range(M) * N_all data = np.ones(N_all * M) / np.sqrt(M) Z_all", "feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C", "dictionary with all values of interest stored in it results = {'times': list_times,", "mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test", "Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel", "if mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save", "[] list_kernel_error_test = [] while len(events) > 0: (birth_time, m, c, dim, loc)", "N_all data = np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr),", "evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M) * res['Z']", "range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events,", "Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0", "sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False,", "M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps through Mondrian kernels with", "move datapoints from split feature to child features Z_all.indices[feature_l * M + m]", "should be created by halving the test set :param mondrian_kernel: flag indicating whether", "map with lifetime lifetime_max, to (2) find a suitable lifetime (inverse kernel width),", "+ 0) active_features_in_tree[m].append(C + 1) # move datapoints from split feature to child", "+= 2 if mondrian_forest: # update Mondrian forest predictions in tree m Z_train", "first cut in each tree feature_data = [np.array(range(N_all)) for _ in range(M)] lX", "if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] =", "forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test,", "birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test =", "+ Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train, error_validation =\\ errors_regression(y,", "sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0)", "mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary with all values of interest", "= np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means y_mean = np.mean(y) y_train", "active_features_in_tree[m].append(m) # iterate through birth times in increasing order list_times = [] list_runtime", "= np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M))", "errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test)", "axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r, birth_time) # add new cuts to", "save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime:", "res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime, None) Z = np.sqrt(M)", "- time_start) if mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E", "errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if", "N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract", "initialize Mondrian tree predictions and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train =", "active_features = [] active_features_in_tree = [[] for _ in range(M)] for m in", "+ 1, M) indices = range(M) * N_all data = np.ones(N_all * M)", "_ in range(M)] lX = np.min(X_all, 0) uX = np.max(X_all, 0) # event", "1) # move datapoints from split feature to child features Z_all.indices[feature_l * M", "new cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C +", "lifetime_max]. This can be used to (1) construct a Mondrian feature map with", "in each tree feature_data = [np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all,", "y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel:", "m, dim, loc)) active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in increasing order", "y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save runtime list_runtime.append(time.clock() - time_start) if mondrian_kernel: # progress", "birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r,", "# initialize sparse feature matrix indptr = range(0, M * N_all + 1,", "test inputs :param y_test: test regression targets :param M: number of Mondrian trees", "dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: # update Mondrian forest", "error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train,", "= [] active_features_in_tree = [[] for _ in range(M)] for m in range(M):", "range(M)] for m in range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0) if", "feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: # update Mondrian forest predictions in", "datapoints from split feature to child features Z_all.indices[feature_l * M + m] =", "C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2 if mondrian_forest: #", "dim] feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r)", "+ np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y,", "if mondrian_forest: # update Mondrian forest predictions in tree m Z_train = Z_all[:N,", "learned weights should be saved :return: dictionary res containing all results \"\"\" N,", "child features Z_all.indices[feature_l * M + m] = C + 0 Z_all.indices[feature_r *", "# subtract target means y_mean = np.mean(y) y_train = y - y_mean #", "linear_model import sys import time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y,", "0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1,", "which forest and kernel learned weights should be saved :return: dictionary res containing", "lifetime_max, to (2) find a suitable lifetime (inverse kernel width), or to (3)", "heapq import numpy as np import scipy.sparse from sklearn import linear_model import sys", "+ 0 Z_all.indices[feature_r * M + m] = C + 1 Z_all =", "# this function returns a dictionary with all values of interest stored in", "index of feature being split events = [] active_features = [] active_features_in_tree =", "compare Mondrian kernel to Mondrian forest across lifetimes. :param X: training inputs :param", "0) active_features_in_tree[m].append(C + 1) # move datapoints from split feature to child features", "delta / M * np.identity(len(active_features_in_tree[m])), np.transpose(Z_train).dot(y_train)) if weights_from_lifetime is not None and birth_time", "be used to (1) construct a Mondrian feature map with lifetime lifetime_max, to", "= clf.coef_ if weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_kernel_save =", "from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta,", "results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results", "returns a dictionary with all values of interest stored in it results =", "list_kernel_error_validation = [] list_kernel_error_test = [] while len(events) > 0: (birth_time, m, c,", "def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])), None, M, lifetime,", "in it results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)} if", "import scipy.sparse from sklearn import linear_model import sys import time from utils import", "range(M)] lX = np.min(X_all, 0) uX = np.max(X_all, 0) # event = tuple", "N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all =", "flag indicating whether mondrian forest should be evaluated :param weights_from_lifetime: lifetime at which", "list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save =", "weights_from_lifetime is not None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M)", "dim, loc = sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m,", "is not None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:,", "(feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1)", "y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test)", "created by halving the test set :param mondrian_kernel: flag indicating whether mondrian kernel", "errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = [] if mondrian_kernel:", "Mondrian forest across lifetimes. :param X: training inputs :param y: training regression targets", "= N + N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test =", "np.min(X_all, 0) uX = np.max(X_all, 0) # event = tuple (time, tree, feature,", "tuple (time, tree, feature, dim, loc), where feature is the index of feature", "errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else:", "= np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if", "m, C + 0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m,", "results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] =", "or to (3) compare Mondrian kernel to Mondrian forest across lifetimes. :param X:", "w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel)", "mondrian_kernel: w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation", "y_hat_train = y_mean + Z_train.dot(w_kernel) y_hat_test = y_mean + Z_test.dot(w_kernel) if validation: error_train,", "for each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0)", "each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l,", "feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0)", "loc_r = sample_cut(lX_r, uX_r, birth_time) # add new cuts to heap if cut_time_l", "predictions and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test =", "= [] list_kernel_error_test = [] while len(events) > 0: (birth_time, m, c, dim,", "np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition", "should be saved :return: dictionary res containing all results \"\"\" N, D =", "= np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train =", "in range(M): cut_time, dim, loc = sample_cut(lX, uX, 0.0) if cut_time < lifetime_max:", "C + 2), copy=False) # sample the cut for each child lX_l =", "kernels with all lifetime in [0, lifetime_max]. This can be used to (1)", "Mondrian kernels with all lifetime in [0, lifetime_max]. This can be used to", "(birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct new feature Xd", "to child features Z_all.indices[feature_l * M + m] = C + 0 Z_all.indices[feature_r", "increasing order list_times = [] list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1)", "lifetime :param delta: ridge regression regularization hyperparameter :param validation: flag indicating whether a", "with all lifetime in [0, lifetime_max]. This can be used to (1) construct", "# initialize Mondrian tree predictions and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train", "axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time)", "= w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] = list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train", "lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary", "weights_from_lifetime: lifetime at which forest and kernel learned weights should be saved :return:", "> loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C", "if mondrian_kernel: sys.stdout.write(\"\\n\") # this function returns a dictionary with all values of", "* N_all data = np.ones(N_all * M) / np.sqrt(M) Z_all = scipy.sparse.csr_matrix((data, indices,", "stored in it results = {'times': list_times, 'runtimes': list_runtime, 'Z': Z_all, 'feature_from_repetition': np.array(feature_from_repetition)}", "[] while len(events) > 0: (birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time)", "%d, test error = %.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel:", "results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime,", "and squared errors trees_y_hat_test = np.zeros((N_test, M)) list_forest_error_train = [] list_forest_error_test = []", "validation: error_train, error_validation =\\ errors_regression(y, y_test[:(N_test/2)], y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):],", ":], axis=0) cut_time_l, dim_l, loc_l = sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :],", "mondrian_forest: if weights_from_lifetime is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train']", "list_times = [] list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1) for _", "validation set should be created by halving the test set :param mondrian_kernel: flag", "and kernel learned weights should be saved :return: dictionary res containing all results", "feature matrix indptr = range(0, M * N_all + 1, M) indices =", "Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) # sample the cut for each", "to sample first cut in each tree feature_data = [np.array(range(N_all)) for _ in", "trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train = y_mean +", "flag indicating whether mondrian kernel should be evaluated :param mondrian_forest: flag indicating whether", "mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return", "len(events) > 0: (birth_time, m, c, dim, loc) = heapq.heappop(events) list_times.append(birth_time) # construct", "y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means y_mean = np.mean(y)", "of Mondrian trees :param lifetime_max: terminal lifetime :param delta: ridge regression regularization hyperparameter", "[w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs =", "is not None and birth_time <= weights_from_lifetime: w_kernel_save = np.array(w_kernel[active_features]) y_hat_train = y_mean", "heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C += 2", "list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X, None, np.empty((0, X.shape[1])),", "construct new feature Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r", "[np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all, 0) uX = np.max(X_all, 0)", "regularization hyperparameter :param validation: flag indicating whether a validation set should be created", "= sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim,", "shape=(N_all, M)) feature_from_repetition = range(M) C = M # bounding box for all", "box for all datapoints used to sample first cut in each tree feature_data", "list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) # save", "mondrian_kernel: # progress indicator in console sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d,", "is not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test']", "= C + 0 Z_all.indices[feature_r * M + m] = C + 1", "fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not None", "as np import scipy.sparse from sklearn import linear_model import sys import time from", "Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train) + delta / M", "forest predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]]", "whether mondrian forest should be evaluated :param weights_from_lifetime: lifetime at which forest and", "# move datapoints from split feature to child features Z_all.indices[feature_l * M +", "y_hat_train, y_hat_test[:(N_test/2)]) error_train, error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test", "sample the cut for each child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l =", "forest should be evaluated :param weights_from_lifetime: lifetime at which forest and kernel learned", "sparse feature matrix indptr = range(0, M * N_all + 1, M) indices", "through birth times in increasing order list_times = [] list_runtime = [] if", "inputs :param y: training regression targets :param X_test: test inputs :param y_test: test", "M)) # initialize Mondrian tree predictions and squared errors trees_y_hat_test = np.zeros((N_test, M))", "= list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M):", "the index of feature being split events = [] active_features = [] active_features_in_tree", "y: training regression targets :param X_test: test inputs :param y_test: test regression targets", "m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train,", "= list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] = list_kernel_error_validation return results def", "tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree = np.linalg.solve(np.transpose(Z_train).dot(Z_train)", "None and birth_time <= weights_from_lifetime: w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] =", "= y_mean + np.mean(trees_y_hat_train, 1) y_hat_test = y_mean + np.mean(trees_y_hat_test, 1) error_train, error_test", "feature is the index of feature being split events = [] active_features =", "= [] if validation: list_kernel_error_validation = [] list_kernel_error_test = [] while len(events) >", "np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation = [] list_kernel_error_test", "This can be used to (1) construct a Mondrian feature map with lifetime", "results \"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test])", "active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1)", ":param validation: flag indicating whether a validation set should be created by halving", "/ %.2E (C = %d, test error = %.3f)\" % (birth_time, lifetime_max, C,", "w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N, M)) # initialize", "active_features.append(m) active_features_in_tree[m].append(m) # iterate through birth times in increasing order list_times = []", "%.3f)\" % (birth_time, lifetime_max, C, error_test)) sys.stdout.flush() if mondrian_kernel: sys.stdout.write(\"\\n\") # this function", "flag indicating whether a validation set should be created by halving the test", "import numpy as np import scipy.sparse from sklearn import linear_model import sys import", "* M + m] = C + 0 Z_all.indices[feature_r * M + m]", "Xd = X_all[feature_data[c], dim] feature_l = (feature_data[c])[Xd <= loc] feature_r = (feature_data[c])[Xd >", "y_test = np.squeeze(y_test) # subtract target means y_mean = np.mean(y) y_train = y", "if validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res =", "n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime is not None and", "in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test = Z_all[N:, active_features_in_tree[m]] w_tree =", "active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) #", "clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False, n_iter=SGD_epochs) clf.fit(Z_train, y_train, coef_init=w_kernel) w_kernel = clf.coef_ if weights_from_lifetime", "times in increasing order list_times = [] list_runtime = [] if mondrian_forest: w_trees", "m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train", "set :param mondrian_kernel: flag indicating whether mondrian kernel should be evaluated :param mondrian_forest:", "is the index of feature being split events = [] active_features = []", "< lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m) feature_from_repetition.append(m) C", "list_forest_error_test if mondrian_kernel: results['kernel_train'] = list_kernel_error_train results['kernel_test'] = list_kernel_error_test if validation: results['kernel_validation'] =", "w_kernel = np.zeros(M) w_kernel_save = np.zeros(M) list_kernel_error_train = [] if validation: list_kernel_error_validation =", "active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move datapoints from", "= [] list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1) for _ in", "not None: results['w_forest'] = np.concatenate(w_trees) results['w_kernel'] = w_kernel_save results['forest_train'] = list_forest_error_train results['forest_test'] =", "cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0,", "evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max, delta, validation=False, mondrian_kernel=False, mondrian_forest=False, weights_from_lifetime=None): \"\"\" Sweeps", "training inputs :param y: training regression targets :param X_test: test inputs :param y_test:", "the test set :param mondrian_kernel: flag indicating whether mondrian kernel should be evaluated", "if mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)] trees_y_hat_train = np.zeros((N, M))", "feature map with lifetime lifetime_max, to (2) find a suitable lifetime (inverse kernel", "forest across lifetimes. :param X: training inputs :param y: training regression targets :param", "from split feature to child features Z_all.indices[feature_l * M + m] = C", "if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r, m, C + 1, dim_r, loc_r)) feature_from_repetition.append(m)", "+ np.mean(trees_y_hat_test, 1) error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) #", "split feature to child features Z_all.indices[feature_l * M + m] = C +", "=\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train,", "0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C + 1) # move datapoints", "indptr = range(0, M * N_all + 1, M) indices = range(M) *", "# update Mondrian forest predictions in tree m Z_train = Z_all[:N, active_features_in_tree[m]] Z_test", "dictionary res containing all results \"\"\" N, D = np.shape(X) N_test = np.shape(X_test)[0]", "y_train = y - y_mean # start timer time_start = time.clock() # initialize", "[] list_runtime = [] if mondrian_forest: w_trees = [np.zeros(1) for _ in range(M)]", "w_trees[m] = w_tree / np.sqrt(M) trees_y_hat_train[:, m] = np.squeeze(Z_train.dot(w_tree)) trees_y_hat_test[:, m] = np.squeeze(Z_test.dot(w_tree))", "should be evaluated :param weights_from_lifetime: lifetime at which forest and kernel learned weights", "if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l, m, C + 0, dim_l, loc_l)) if", "lifetime_max: terminal lifetime :param delta: ridge regression regularization hyperparameter :param validation: flag indicating", "with all values of interest stored in it results = {'times': list_times, 'runtimes':", "sample_cut(lX, uX, 0.0) if cut_time < lifetime_max: heapq.heappush(events, (cut_time, m, m, dim, loc))", "# update Mondrian kernel predictions if mondrian_kernel: w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c]", "= np.array(np.r_[X, X_test]) N_all = N + N_test if mondrian_forest or mondrian_kernel: y", "np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means y_mean = np.mean(y) y_train =", "if validation: list_kernel_error_validation = [] list_kernel_error_test = [] while len(events) > 0: (birth_time,", "error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_forest_error_train.append(error_train) list_forest_error_test.append(error_test) # update Mondrian kernel", "np.shape(X_test)[0] X_all = np.array(np.r_[X, X_test]) N_all = N + N_test if mondrian_forest or", "C = M # bounding box for all datapoints used to sample first", "(cut_time_l, m, C + 0, dim_l, loc_l)) if cut_time_r < lifetime_max: heapq.heappush(events, (cut_time_r,", "error_test =\\ errors_regression(y, y_test[(N_test/2):], y_hat_train, y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test,", "kernel to Mondrian forest across lifetimes. :param X: training inputs :param y: training", ":param M: number of Mondrian trees :param lifetime_max: terminal lifetime :param delta: ridge", "w_kernel = np.append(w_kernel, [w_kernel[c], w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test =", "birth_time) # add new cuts to heap if cut_time_l < lifetime_max: heapq.heappush(events, (cut_time_l,", "active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C + 0) active_features.append(C + 1) active_features_in_tree[m].append(C + 0) active_features_in_tree[m].append(C +", "saved :return: dictionary res containing all results \"\"\" N, D = np.shape(X) N_test", "= np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l, loc_l =", "active_features_in_tree[m].append(C + 1) # move datapoints from split feature to child features Z_all.indices[feature_l", "mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) # subtract target means", ":], axis=0) uX_r = np.max(X_all[feature_r, :], axis=0) cut_time_r, dim_r, loc_r = sample_cut(lX_r, uX_r,", "for _ in range(M)] for m in range(M): cut_time, dim, loc = sample_cut(lX,", "y_hat_test[(N_test/2):]) list_kernel_error_validation.append(error_validation) else: error_train, error_test = errors_regression(y, y_test, y_hat_train, y_hat_test) list_kernel_error_train.append(error_train) list_kernel_error_test.append(error_test) #", "validation: results['kernel_validation'] = list_kernel_error_validation return results def Mondrian_kernel_features(X, lifetime, M): res = evaluate_all_lifetimes(X,", "= M # bounding box for all datapoints used to sample first cut", "+ 1 Z_all = scipy.sparse.csr_matrix((Z_all.data, Z_all.indices, Z_all.indptr), shape=(N_all, C + 2), copy=False) #", ":param weights_from_lifetime: lifetime at which forest and kernel learned weights should be saved", "= sample_cut(lX_l, uX_l, birth_time) lX_r = np.min(X_all[feature_r, :], axis=0) uX_r = np.max(X_all[feature_r, :],", "<= loc] feature_r = (feature_data[c])[Xd > loc] feature_data.append(feature_l) feature_data.append(feature_r) active_features.remove(c) active_features_in_tree[m].remove(c) active_features.append(C +", "np.squeeze(Z_test.dot(w_tree)) # update Mondrian forest error y_hat_train = y_mean + np.mean(trees_y_hat_train, 1) y_hat_test", "w_kernel[c]]) w_kernel[c] = 0 Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1", "Z_train = Z_all[:N] Z_test = Z_all[N:] SGD_epochs = 1 clf = linear_model.SGDRegressor(alpha=delta, fit_intercept=False,", "+ N_test if mondrian_forest or mondrian_kernel: y = np.squeeze(y) y_test = np.squeeze(y_test) #", "tree feature_data = [np.array(range(N_all)) for _ in range(M)] lX = np.min(X_all, 0) uX", "child lX_l = np.min(X_all[feature_l, :], axis=0) uX_l = np.max(X_all[feature_l, :], axis=0) cut_time_l, dim_l,", "width), or to (3) compare Mondrian kernel to Mondrian forest across lifetimes. :param", "sys.stdout.write(\"\\rTime: %.2E / %.2E (C = %d, test error = %.3f)\" % (birth_time,", "time from utils import sample_cut, errors_regression def evaluate_all_lifetimes(X, y, X_test, y_test, M, lifetime_max,", "split events = [] active_features = [] active_features_in_tree = [[] for _ in", "= np.mean(y) y_train = y - y_mean # start timer time_start = time.clock()", "# iterate through birth times in increasing order list_times = [] list_runtime =", "trees_y_hat_train = np.zeros((N, M)) # initialize Mondrian tree predictions and squared errors trees_y_hat_test", "iterate through birth times in increasing order list_times = [] list_runtime = []", "= scipy.sparse.csr_matrix((data, indices, indptr), shape=(N_all, M)) feature_from_repetition = range(M) C = M #" ]
[ "solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\") print(f\"Part 2: {part2}\") return 0", ") parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative", "community-based solution\", ) args = parser.parse_args() # Read the input data data_file =", "from typing import cast from aoc2021.lib import ModSolution def main() -> int: \"\"\"Run", "command line application for running the advent of code solutions.\"\"\" import argparse import", "parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", ) args = parser.parse_args()", "for running the advent of code solutions.\"\"\" import argparse import importlib import pathlib", "code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\",", "running the advent of code solutions.\"\"\" import argparse import importlib import pathlib from", "mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1 # Get", "parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based", "the alternative community-based solution\", ) args = parser.parse_args() # Read the input data", "day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", ) args", "of code solutions.\"\"\" import argparse import importlib import pathlib from typing import cast", "import pathlib from typing import cast from aoc2021.lib import ModSolution def main() ->", "main() -> int: \"\"\"Run the main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run", "as fh: raw_data = fh.read() # Load the solution module if args.extra: submodule", "Load the solution module if args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\"", "help=\"run the alternative community-based solution\", ) args = parser.parse_args() # Read the input", "\"\"\"Run the main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of", "solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the", "entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\" )", "fh.read() # Load the solution module if args.extra: submodule = \"solutions_extra\" else: submodule", "1 # Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\") print(f\"Part", "submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError", "submodule = \"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution =", "if not data_file.exists(): print(f\"Input data file not found: {data_file}\") return 1 with data_file.open()", "point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\" ) parser.add_argument(\"day\",", "Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file", "CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\"", "\"\"\"The command line application for running the advent of code solutions.\"\"\" import argparse", "code solutions.\"\"\" import argparse import importlib import pathlib from typing import cast from", "puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run", "aoc2021.lib import ModSolution def main() -> int: \"\"\"Run the main CLI entry point.\"\"\"", "the main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of code", "import ModSolution def main() -> int: \"\"\"Run the main CLI entry point.\"\"\" parser", "number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", ) args =", "data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not found: {data_file}\") return", "found: {data_file}\") return 1 with data_file.open() as fh: raw_data = fh.read() # Load", "module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc)", "parser = argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int,", "print(f\"Input data file not found: {data_file}\") return 1 with data_file.open() as fh: raw_data", "= pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not found: {data_file}\") return 1", "type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\",", "args = parser.parse_args() # Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not", "return 1 with data_file.open() as fh: raw_data = fh.read() # Load the solution", "int: \"\"\"Run the main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent", "print(exc) return 1 # Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1:", "parser.parse_args() # Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input", "argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day", "main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the advent of code puzzle", "advent of code solutions.\"\"\" import argparse import importlib import pathlib from typing import", "= \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as", "= f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return", "-> int: \"\"\"Run the main CLI entry point.\"\"\" parser = argparse.ArgumentParser( description=\"Run the", "import cast from aoc2021.lib import ModSolution def main() -> int: \"\"\"Run the main", "\"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", ) args = parser.parse_args() #", "except ModuleNotFoundError as exc: print(exc) return 1 # Get the solutions part1, part2", "module if args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\"", "= fh.read() # Load the solution module if args.extra: submodule = \"solutions_extra\" else:", ") args = parser.parse_args() # Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if", "f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1", "importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1 # Get the solutions part1,", "pathlib from typing import cast from aoc2021.lib import ModSolution def main() -> int:", "cast from aoc2021.lib import ModSolution def main() -> int: \"\"\"Run the main CLI", "the advent of code solutions.\"\"\" import argparse import importlib import pathlib from typing", "advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\",", "# Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\") print(f\"Part 2:", "Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\") print(f\"Part 2: {part2}\")", "import importlib import pathlib from typing import cast from aoc2021.lib import ModSolution def", "\"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", ) args = parser.parse_args() # Read", "{data_file}\") return 1 with data_file.open() as fh: raw_data = fh.read() # Load the", "not found: {data_file}\") return 1 with data_file.open() as fh: raw_data = fh.read() #", "raw_data = fh.read() # Load the solution module if args.extra: submodule = \"solutions_extra\"", "= cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1 # Get the", "alternative community-based solution\", ) args = parser.parse_args() # Read the input data data_file", "line application for running the advent of code solutions.\"\"\" import argparse import importlib", "the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\") print(f\"Part 2: {part2}\") return", "not data_file.exists(): print(f\"Input data file not found: {data_file}\") return 1 with data_file.open() as", "= argparse.ArgumentParser( description=\"Run the advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the", "file not found: {data_file}\") return 1 with data_file.open() as fh: raw_data = fh.read()", "argparse import importlib import pathlib from typing import cast from aoc2021.lib import ModSolution", "pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not found: {data_file}\") return 1 with", "solution module if args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\" module =", "else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except", "help=\"the day number\") parser.add_argument( \"-e\", \"--extra\", action=\"store_true\", help=\"run the alternative community-based solution\", )", "solutions.\"\"\" import argparse import importlib import pathlib from typing import cast from aoc2021.lib", "ModuleNotFoundError as exc: print(exc) return 1 # Get the solutions part1, part2 =", "input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not found:", "from aoc2021.lib import ModSolution def main() -> int: \"\"\"Run the main CLI entry", "def main() -> int: \"\"\"Run the main CLI entry point.\"\"\" parser = argparse.ArgumentParser(", "with data_file.open() as fh: raw_data = fh.read() # Load the solution module if", "data_file.open() as fh: raw_data = fh.read() # Load the solution module if args.extra:", "of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument( \"-e\", \"--extra\",", "data file not found: {data_file}\") return 1 with data_file.open() as fh: raw_data =", "\"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module))", "fh: raw_data = fh.read() # Load the solution module if args.extra: submodule =", "as exc: print(exc) return 1 # Get the solutions part1, part2 = mod_solution.run(raw_data)", "action=\"store_true\", help=\"run the alternative community-based solution\", ) args = parser.parse_args() # Read the", "solution\", ) args = parser.parse_args() # Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\")", "args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution", "1 with data_file.open() as fh: raw_data = fh.read() # Load the solution module", "the solution module if args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\" module", "typing import cast from aoc2021.lib import ModSolution def main() -> int: \"\"\"Run the", "exc: print(exc) return 1 # Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part", "# Load the solution module if args.extra: submodule = \"solutions_extra\" else: submodule =", "# Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data", "data_file.exists(): print(f\"Input data file not found: {data_file}\") return 1 with data_file.open() as fh:", "importlib import pathlib from typing import cast from aoc2021.lib import ModSolution def main()", "\"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc:", "the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not", "data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists(): print(f\"Input data file not found: {data_file}\")", "import argparse import importlib import pathlib from typing import cast from aoc2021.lib import", "return 1 # Get the solutions part1, part2 = mod_solution.run(raw_data) print(f\"Part 1: {part1}\")", "try: mod_solution = cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1 #", "if args.extra: submodule = \"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try:", "application for running the advent of code solutions.\"\"\" import argparse import importlib import", "cast(ModSolution, importlib.import_module(module)) except ModuleNotFoundError as exc: print(exc) return 1 # Get the solutions", "the advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\") parser.add_argument(", "description=\"Run the advent of code puzzle solutions.\" ) parser.add_argument(\"day\", type=int, help=\"the day number\")", "ModSolution def main() -> int: \"\"\"Run the main CLI entry point.\"\"\" parser =", "= \"solutions_extra\" else: submodule = \"solutions\" module = f\"aoc2021.{submodule}.day{args.day:02d}\" try: mod_solution = cast(ModSolution,", "= parser.parse_args() # Read the input data data_file = pathlib.Path(f\"input/{args.day:02d}.txt\") if not data_file.exists():" ]
[ "set(input()) if b.issubset(nucleotides): for i in b: if i in complement: dna_rna =", "= {'A', 'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b", "for i in b: if i in complement: dna_rna = dna_rna+complement[i] else: dna_rna", "= str() nucleotides = {'A', 'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G',", "'A':'U'} b = set(input()) if b.issubset(nucleotides): for i in b: if i in", "if i in complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else:", "complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'}", "= dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i]", "dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for i in b)) except:print(\"Invalid", "else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for i", "'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides):", "= set(input()) if b.issubset(nucleotides): for i in b: if i in complement: dna_rna", "nucleotides = {'A', 'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'}", "dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for i in", "print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for i in b)) except:print(\"Invalid Input\")", "'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input())", "i in complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid", "i in b: if i in complement: dna_rna = dna_rna+complement[i] else: dna_rna =", "b = set(input()) if b.issubset(nucleotides): for i in b: if i in complement:", "= {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for i in", "b: if i in complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna)", "complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for i", "b.issubset(nucleotides): for i in b: if i in complement: dna_rna = dna_rna+complement[i] else:", "in complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")'''", "'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for", "{'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for i in b:", "'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b = set(input()) if", "'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for i in b: if i", "{'A', 'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A', 'A':'U'} b =", "dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for", "in b: if i in complement: dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i", "'''dna_rna = str() nucleotides = {'A', 'C', 'G', 'T', 'U'} complement = {'G':'C',", "dna_rna = dna_rna+complement[i] else: dna_rna = dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input()", "str() nucleotides = {'A', 'C', 'G', 'T', 'U'} complement = {'G':'C', 'C':'G', 'T':'A',", "= dna_rna+i print(dna_rna) else: print(\"Invalid Input\")''' a={'G':'C','C':'G','T':'A','A':'U'} b=input() try:print(''.join(a[i] for i in b))", "if b.issubset(nucleotides): for i in b: if i in complement: dna_rna = dna_rna+complement[i]", "'C':'G', 'T':'A', 'A':'U'} b = set(input()) if b.issubset(nucleotides): for i in b: if" ]
[ "the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick):", "\"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ", "parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to the IRC server\") parser.add_argument(\"-n\",", "\"--channel\", required=True, action='append', help=\"Channel to join. May be used more than once\") #", "to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True,", "use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") # Add optional", "analyzed result lastMessageValence = None # Handle all messages for message in irc.messages:", "help=\"Channel to join. May be used more than once\") # Parse the arguments", "irc.messages: if not isinstance(message, IRCMessage): continue target = message.author if message.target == options.nick", "an argument parser for parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot providing", "lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores if", "Handle all messages for message in irc.messages: if not isinstance(message, IRCMessage): continue target", "parser.parse_args() # Create an IRC connection irc = IRC( options.server, options.port, options.user, options.nick,", "༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main entrypoint", "[ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\",", "scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores if __name__ ==", "for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer()", "server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True,", "type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to", "The last analyzed result lastMessageValence = None # Handle all messages for message", "︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\",", "[ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼", "join. May be used more than once\") # Parse the arguments options =", "\"--server\", required=True, type=str, help=\"The server to connect to\") # Add optional parameters for", "default=True, type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection", "sentiment analysis and reactions using ASCII emojis\") # Add parameters for the server", "\", \".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug))", "\"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵", "a simple sentiment analysis on your messages and respond with emojis\") irc.send_message(target, \"You", "last analyzed result lastMessageValence = None # Handle all messages for message in", "help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis on your messages and respond", "lastMessageValence = None # Handle all messages for message in irc.messages: if not", ") irc.connect() # Connect to specified channels for channel in options.channel: irc.join(channel) #", "͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ]", "format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser for", "\"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]])", "help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use", "IRC bot providing sentiment analysis and reactions using ASCII emojis\") # Add parameters", "for parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis and", "seconds\") # Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to", "= SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence =", "͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main()", "debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target,", "= IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to", "⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\"", "options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified channels for channel in", "continue target = message.author if message.target == options.nick else message.target if message.message ==", "%(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser for parsing CLI", "# Parse the arguments options = parser.parse_args() # Create an IRC connection irc", "server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to", "irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect", "level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser for parsing CLI arguments", "from irc import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives", "used more than once\") # Parse the arguments options = parser.parse_args() # Create", "IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to the IRC", "IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel", "type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout", "Create an argument parser for parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot", "logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an", "๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ", "= scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores if __name__", "Parse the arguments options = parser.parse_args() # Create an IRC connection irc =", "timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified channels for channel in options.channel:", "options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified channels", "connect to\") # Add optional parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697,", "in seconds\") # Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username", "parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions using ASCII emojis\")", "so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not", "positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ(", "optional parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to", "of the bot.\"\"\" # Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\",", "in options.channel: irc.join(channel) # The last analyzed result lastMessageValence = None # Handle", "to use when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to", "not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") #", "analysis and reactions using ASCII emojis\") # Add parameters for the server connection", "to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\",", "debug = \", \".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}.", "\".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else:", "෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main entrypoint of", "== \"{}: debug\".format(options.nick): if lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug", "for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect to\")", "# Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use", "\"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") # Add optional parameters for authentication", "connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool,", "reactions using ASCII emojis\") # Add parameters for the server connection parser.add_argument(\"-s\", \"--server\",", "\"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives =", "Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May be used", "for channel in options.channel: irc.join(channel) # The last analyzed result lastMessageValence = None", "else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives))", "ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵", "debug the sentiment analysis of the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick))", "text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores", "┻━┻\" ] def main() -> None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure", "irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence =", "irc import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives =", "than once\") # Parse the arguments options = parser.parse_args() # Create an IRC", "for message in irc.messages: if not isinstance(message, IRCMessage): continue target = message.author if", "options.nick else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple", "server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect to\") # Add", "if lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}':", "parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May be used more than once\")", "ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\",", "import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹", "debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not None: compound =", "\"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯", "\"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None:", "(github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May be used more than", "for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to the", "== \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis on your messages", "to specified channels for channel in options.channel: irc.join(channel) # The last analyzed result", "when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\",", "lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if", "with emojis\") irc.send_message(target, \"You can debug the sentiment analysis of the last message", "irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not None:", "== options.nick else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a", "irc.send_message(target, \"I perform a simple sentiment analysis on your messages and respond with", "perform a simple sentiment analysis on your messages and respond with emojis\") irc.send_message(target,", "argparse import ArgumentParser from irc import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment", "negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\",", "if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6:", "import csv import logging import random from argparse import ArgumentParser from irc import", "IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\",", "options.channel: irc.join(channel) # The last analyzed result lastMessageValence = None # Handle all", "parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect to\") # Add optional parameters", "action='append', help=\"Channel to join. May be used more than once\") # Parse the", "\"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() ->", "datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser for parsing CLI arguments parser", "optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting", "when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when", "to use when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2", "target = message.author if message.target == options.nick else message.target if message.message == \"{}:", "bot providing sentiment analysis and reactions using ASCII emojis\") # Add parameters for", "analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <=", "use when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\")", "= ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions using ASCII emojis\") #", "parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether", "csv import logging import random from argparse import ArgumentParser from irc import IRC", "\"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\",", "None # Handle all messages for message in irc.messages: if not isinstance(message, IRCMessage):", "parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect", "# Handle all messages for message in irc.messages: if not isinstance(message, IRCMessage): continue", "connection irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() #", "if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis on", "[%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser for parsing", "not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for", "the arguments options = parser.parse_args() # Create an IRC connection irc = IRC(", "can debug the sentiment analysis of the last message like so:\") irc.send_message(target, \"{}:", "CLI arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions using", "Add parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to", "emojis\") # Add parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The", "-> None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure the default logging format", "last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if", "{}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target,", "import ArgumentParser from irc import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import", "help=\"Nick to use when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot", "= message.author if message.target == options.nick else message.target if message.message == \"{}: help\".format(options.nick):", "\"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\",", "specified channels for channel in options.channel: irc.join(channel) # The last analyzed result lastMessageValence", "in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message)", "using ASCII emojis\") # Add parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True,", "import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\",", "emojis\") irc.send_message(target, \"You can debug the sentiment analysis of the last message like", "not isinstance(message, IRCMessage): continue target = message.author if message.target == options.nick else message.target", "type=str, help=\"The server to connect to\") # Add optional parameters for the server", "more than once\") # Parse the arguments options = parser.parse_args() # Create an", "message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis", "import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [", ") # Create an argument parser for parsing CLI arguments parser = ArgumentParser(description=\"An", "\"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main entrypoint of the bot.\"\"\"", "default=\"sentiment-bot\", help=\"Nick to use when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment", "entrypoint of the bot.\"\"\" # Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s]", "SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores", "to\") # Add optional parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int,", "or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\")", "IRC connection irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect()", "irc.join(channel) # The last analyzed result lastMessageValence = None # Handle all messages", "irc.send_message(target, \"You can debug the sentiment analysis of the last message like so:\")", "vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩(", "ASCII emojis\") # Add parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str,", "help=\"The server to connect to\") # Add optional parameters for the server connection", "authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to the IRC", "irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"]", "messages for message in irc.messages: if not isinstance(message, IRCMessage): continue target = message.author", "TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") # Add optional parameters", "= analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"]", "\"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >=", "the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) #", "\"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives", "\"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [", "message in irc.messages: if not isinstance(message, IRCMessage): continue target = message.author if message.target", "None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure the default logging format logging.basicConfig(", "use_tls=options.use_tls ) irc.connect() # Connect to specified channels for channel in options.channel: irc.join(channel)", "def main() -> None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure the default", "\"You can debug the sentiment analysis of the last message like so:\") irc.send_message(target,", "to connect to\") # Add optional parameters for the server connection parser.add_argument(\"-p\", \"--port\",", "message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence", "v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May be used more", "parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float,", "connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting", "message.target == options.nick else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform", "\"\"\"Main entrypoint of the bot.\"\"\" # Configure the default logging format logging.basicConfig( format=\"[%(asctime)s]", "0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence", "default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not", "parser for parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis", "= [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ", "if message.target == options.nick else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I", "Create an IRC connection irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls", "IRCMessage): continue target = message.author if message.target == options.nick else message.target if message.message", "# The last analyzed result lastMessageValence = None # Handle all messages for", "in irc.messages: if not isinstance(message, IRCMessage): continue target = message.author if message.target ==", "to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to", "\"{}: debug\".format(options.nick): if lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug =", "default=300, type=float, help=\"Connection timeout in seconds\") # Add optional parameters for authentication etc.", "parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions", "\"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\",", "# Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\"", "parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect", "message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis on your", "= \", \".join([\"'{}': {}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound,", "and reactions using ASCII emojis\") # Add parameters for the server connection parser.add_argument(\"-s\",", "message.author if message.target == options.nick else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target,", "{}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6:", "argument parser for parsing CLI arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment", "of the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}:", "help=\"Connection timeout in seconds\") # Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\",", "all messages for message in irc.messages: if not isinstance(message, IRCMessage): continue target =", "result lastMessageValence = None # Handle all messages for message in irc.messages: if", "scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target,", ")ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"(", "# Connect to specified channels for channel in options.channel: irc.join(channel) # The last", "if not isinstance(message, IRCMessage): continue target = message.author if message.target == options.nick else", "# Add optional parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The", "import random from argparse import ArgumentParser from irc import IRC from irc.messages import", "providing sentiment analysis and reactions using ASCII emojis\") # Add parameters for the", "lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text,", ">= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives))", "= parser.parse_args() # Create an IRC connection irc = IRC( options.server, options.port, options.user,", "ArgumentParser from irc import IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer", "# Create an argument parser for parsing CLI arguments parser = ArgumentParser(description=\"An IRC", "IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified", "be used more than once\") # Parse the arguments options = parser.parse_args() #", "Add optional parameters for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port", "the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\",", "for the server connection parser.add_argument(\"-p\", \"--port\", default=6697, type=int, help=\"The port to connect to\")", "SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\",", "elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not None: compound = \"compound:", "= \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for text, valence in", "from argparse import ArgumentParser from irc import IRC from irc.messages import IRCMessage from", "like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is", "︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main entrypoint of the bot.\"\"\" #", "random from argparse import ArgumentParser from irc import IRC from irc.messages import IRCMessage", "%H:%M:%S\" ) # Create an argument parser for parsing CLI arguments parser =", "valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer =", "import logging import random from argparse import ArgumentParser from irc import IRC from", "‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\", \"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ]", "is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence)", "logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument parser", "\"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡°", "] negatives = [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\",", "parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to", "an IRC connection irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls )", "help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in", "connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect to\") # Add optional", "{}\".format(text, valence) for text, valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer", "parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") # Add optional parameters for", "sentiment analysis on your messages and respond with emojis\") irc.send_message(target, \"You can debug", "type=float, help=\"Connection timeout in seconds\") # Add optional parameters for authentication etc. parser.add_argument(\"-u\",", "parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join.", "Connect to specified channels for channel in options.channel: irc.join(channel) # The last analyzed", "debug\".format(options.nick): if lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \",", "to join. May be used more than once\") # Parse the arguments options", "else message.target if message.message == \"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment", "channel in options.channel: irc.join(channel) # The last analyzed result lastMessageValence = None #", "valence in lastMessageValence[\"debug\"]]) irc.send_message(target, \"{}. {}\".format(compound, debug)) else: analyzer = SentimentIntensityAnalyzer() scores =", "# Add parameters for the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server", "\"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\",", "and respond with emojis\") irc.send_message(target, \"You can debug the sentiment analysis of the", "logging import random from argparse import ArgumentParser from irc import IRC from irc.messages", "default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May be", "default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create", "help=\"Username to use when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick", "options.port, options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified channels for", "irc.connect() # Connect to specified channels for channel in options.channel: irc.join(channel) # The", "scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores if __name__ == \"__main__\": main()", "the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append',", "analysis of the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message ==", "channels for channel in options.channel: irc.join(channel) # The last analyzed result lastMessageValence =", "༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main entrypoint of the", "analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence", "messages and respond with emojis\") irc.send_message(target, \"You can debug the sentiment analysis of", "parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to the IRC server\") parser.add_argument(\"-g\",", "= [ \"(ノ ゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\",", "required=True, type=str, help=\"The server to connect to\") # Add optional parameters for the", "compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for text, valence", "ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions using ASCII emojis\") # Add", "bot.\"\"\" # Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d", "options = parser.parse_args() # Create an IRC connection irc = IRC( options.server, options.port,", "= None # Handle all messages for message in irc.messages: if not isinstance(message,", "\"{}: debug\".format(options.nick)) elif message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not None: compound", "from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\", \"(っˆڡˆς)\", \"♥‿♥\", \"(づ。◕‿‿◕。)づ\",", "etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when connecting to the IRC server\")", "\"٩( ๑╹ ꇴ╹)۶\", \"ᕕ( ᐛ )ᕗ\", \"٩(^‿^)۶\", \"\(^O^)/\" ] negatives = [ \"(ノ", "to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300, type=float, help=\"Connection timeout in seconds\") # Add", "random.choice(positives)) lastMessageValence = scores elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores", "゜Д゜)ノ ︵ ┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴", "connecting to the IRC server\") parser.add_argument(\"-g\", \"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\",", "Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" )", "\"--gecos\", default=\"Sentiment Bot v1.0.2 (github.com/AlexGustafsson/irc-sentiment-bot)\") parser.add_argument(\"-c\", \"--channel\", required=True, action='append', help=\"Channel to join. May", "# Create an IRC connection irc = IRC( options.server, options.port, options.user, options.nick, timeout=options.timeout,", "\"{}: help\".format(options.nick): irc.send_message(target, \"I perform a simple sentiment analysis on your messages and", "the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to the", "Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\", help=\"Username to use when", "message.message == \"{}: debug\".format(options.nick): if lastMessageValence is not None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"])", "\"--port\", default=6697, type=int, help=\"The port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or", "\"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def main() -> None: \"\"\"Main", "on your messages and respond with emojis\") irc.send_message(target, \"You can debug the sentiment", "arguments options = parser.parse_args() # Create an IRC connection irc = IRC( options.server,", "ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\" ] def", "the bot.\"\"\" # Configure the default logging format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO,", "isinstance(message, IRCMessage): continue target = message.author if message.target == options.nick else message.target if", "analysis on your messages and respond with emojis\") irc.send_message(target, \"You can debug the", "scores = analyzer.polarity_scores(message.message) if scores[\"compound\"] >= 0.6: irc.send_message(target, random.choice(positives)) lastMessageValence = scores elif", "] def main() -> None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure the", "from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫", "main() -> None: \"\"\"Main entrypoint of the bot.\"\"\" # Configure the default logging", "options.user, options.nick, timeout=options.timeout, use_tls=options.use_tls ) irc.connect() # Connect to specified channels for channel", "format logging.basicConfig( format=\"[%(asctime)s] [%(levelname)-5s] %(message)s\", level=logging.INFO, datefmt=\"%Y-%m-%d %H:%M:%S\" ) # Create an argument", "your messages and respond with emojis\") irc.send_message(target, \"You can debug the sentiment analysis", "connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\",", "server to connect to\") # Add optional parameters for the server connection parser.add_argument(\"-p\",", "sentiment analysis of the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif message.message", "once\") # Parse the arguments options = parser.parse_args() # Create an IRC connection", "irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄ ⁻̫ ‾᷅˵)\",", "timeout in seconds\") # Add optional parameters for authentication etc. parser.add_argument(\"-u\", \"--user\", default=\"sentiment-bot\",", "\"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\"", "server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use when connecting to the IRC server\")", "required=True, action='append', help=\"Channel to join. May be used more than once\") # Parse", "┻━┻\", \"(;´༎ຶД༎ຶ`)\", \"( ͡° ʖ̯ ͡°)\", \"(ノಠ益ಠ)ノ彡┻━┻\", \"t(ಠ益ಠt)\", \"༼ ༎ຶ ෴ ༎ຶ༽\", \"┻━┻", "to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use TLS\") parser.add_argument(\"-t\", \"--timeout\", default=300,", "respond with emojis\") irc.send_message(target, \"You can debug the sentiment analysis of the last", "arguments parser = ArgumentParser(description=\"An IRC bot providing sentiment analysis and reactions using ASCII", "elif scores[\"compound\"] <= -0.6: irc.send_message(target, random.choice(negatives)) lastMessageValence = scores if __name__ == \"__main__\":", "port to connect to\") parser.add_argument(\"--use-tls\", default=True, type=bool, help=\"Whether or not to use TLS\")", "default=\"sentiment-bot\", help=\"Username to use when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\",", "simple sentiment analysis on your messages and respond with emojis\") irc.send_message(target, \"You can", "IRC from irc.messages import IRCMessage from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer positives = [ \"(˶‾᷄", "the server connection parser.add_argument(\"-s\", \"--server\", required=True, type=str, help=\"The server to connect to\") #", "None: compound = \"compound: {}\".format(lastMessageValence[\"compound\"]) debug = \", \".join([\"'{}': {}\".format(text, valence) for text,", "May be used more than once\") # Parse the arguments options = parser.parse_args()", "the sentiment analysis of the last message like so:\") irc.send_message(target, \"{}: debug\".format(options.nick)) elif", "\"I perform a simple sentiment analysis on your messages and respond with emojis\")", "use when connecting to the IRC server\") parser.add_argument(\"-n\", \"--nick\", default=\"sentiment-bot\", help=\"Nick to use" ]
[ "] inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\"", "IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size = len(dataset) indices = list(range(dataset_size))", "= 0 for z in zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename)", "transl=True): \"\"\" Translate and align pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline(", "= len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed)", "train split dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size))", "for v in y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0])", "'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate and align pangenome", "= torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\"", "as np import io import os import re import torch import torch.utils.data as", "but __getitem__ also returns indices as last value in tuple \"\"\" def __init__(self,", "= pattern.match(f, len(f)-6) d = m.group(1) # print(d) y = metadf[d] omit =", "torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size = len(dataset)", "from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical", "= subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [", "\"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for z in zh.infolist():", "def transform(input, output): \"\"\"Snakemake function Split and transform input data \"\"\" genesdf =", "gzip.open(fh, 'rb') as gz: fn = gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\":", "Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)", "sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp", "for tensor in tensors) self.tensors = tensors def __getitem__(self, index): t = [tensor[index]", "{ 'S': 0, 'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f", "pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5, 'R': 1", "pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([", "metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5, 'R': 1 } pattern =", "stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in", "indices as last value in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) ==", "align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE,", "Dataset and Dataloader objects needed for one experiment \"\"\" def __init__(self, data_file, batch_size,", "\"\"\" Decompress gzipped fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record", "__name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln',", "= genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in y ]) # print(ylabels.shape)", "pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S':", "f in output: m = pattern.match(f, len(f)-6) d = m.group(1) # print(d) y", "re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f, len(f)-6) d = m.group(1) #", "cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout", "isolates = all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v]", "sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake", "pattern.match(f, len(f)-6) d = m.group(1) # print(d) y = metadf[d] omit = pd.isnull(y)", "# print(d) y = metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y =", "function Split and transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf", "io import os import re import torch import torch.utils.data as data_utils import subprocess", "err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in", "FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but", "train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader =", "for one experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): #", "class GeneDataset: \"\"\" Container object that provides access to the PyTorch Dataset and", "gz: fn = gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for fn in", "*tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors def", "assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors def __getitem__(self,", "MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as", "stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp", "list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices =", "# Test / train split dataset_size = len(dataset) indices = list(range(dataset_size)) split =", "* dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize", "Dataloader objects needed for one experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset,", "Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns", "all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v", "data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data data = torch.load(data_file)", "on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh)", "\"\"\" Container object that provides access to the PyTorch Dataset and Dataloader objects", "= data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset,", "== \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln', 'w')", "encoding = { 'S': 0, 'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\")", "== tensor.size(0) for tensor in tensors) self.tensors = tensors def __getitem__(self, index): t", "z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz:", "also returns indices as last value in tuple \"\"\" def __init__(self, *tensors): assert", "[tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset:", "dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders", "train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler =", "torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh,", "= pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels =", "self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides access to the PyTorch Dataset", "objects needed for one experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed,", "'rb') as gz: fn = gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for", "= [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp = \"\".join(inp) align, err", "= \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped", "metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5,", "\"\"\" Translate and align pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe',", "Container object that provides access to the PyTorch Dataset and Dataloader objects needed", "np import io import os import re import torch import torch.utils.data as data_utils", "Translate and align pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True)", "gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn)", "0, 'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in output:", "self.tensors = tensors def __getitem__(self, index): t = [tensor[index] for tensor in self.tensors]", "experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor", "fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln', 'w') as ofh: ofh.write(align(ifh))", "output): \"\"\"Snakemake function Split and transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0,", "import gzip import pandas as pd import numpy as np import io import", "y = metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X", "torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last value in tuple \"\"\" def", "def __getitem__(self, index): t = [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def", "/ train split dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split *", "f) def align(fh, transl=True): \"\"\" Translate and align pangenome cluster fasta file \"\"\"", "= metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5, 'R': 1 } pattern", "FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp = \"\".join(inp)", "in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for", "AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\"", "= int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split]", "X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in y ]) #", "data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size,", "test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices)", "t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides", "__len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides access to the", "re import torch import torch.utils.data as data_utils import subprocess import zipfile import zlib", "dataset = IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size = len(dataset) indices", "= torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def", "return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides access", "tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test / train", "index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0,", "# Load tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test", "np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler", "tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container", "needed for one experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0):", "m = pattern.match(f, len(f)-6) d = m.group(1) # print(d) y = metadf[d] omit", "object that provides access to the PyTorch Dataset and Dataloader objects needed for", "returns indices as last value in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0)", "len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices)", "align pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align", "for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln', 'w') as ofh:", "GeneDataset: \"\"\" Container object that provides access to the PyTorch Dataset and Dataloader", "import subprocess import zipfile import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import", "import os import re import torch import torch.utils.data as data_utils import subprocess import", "data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size", "print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor,", "pandas as pd import numpy as np import io import os import re", "tensor.size(0) for tensor in tensors) self.tensors = tensors def __getitem__(self, index): t =", "print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor", "input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates =", "[ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp = \"\".join(inp) align, err =", "split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:],", "self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function", "Load tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test /", "= data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def", "= data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and transform input data \"\"\"", "# print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates':", "provides access to the PyTorch Dataset and Dataloader objects needed for one experiment", "Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences =", "return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in zip archive \"\"\"", "Split and transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf =", "last value in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for", "= torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size =", "omit = pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels", "import torch.utils.data as data_utils import subprocess import zipfile import zlib from Bio import", "tensors) self.tensors = tensors def __getitem__(self, index): t = [tensor[index] for tensor in", "gzipped fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i", "indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader =", "align(fh, transl=True): \"\"\" Translate and align pangenome cluster fasta file \"\"\" align_exe =", "validation_split=0): # Load tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) #", "as zh: i = 0 for z in zh.infolist(): if not z.is_dir(): print(z.filename)", "v in y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) #", "def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data data", "= list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices", "to the PyTorch Dataset and Dataloader objects needed for one experiment \"\"\" def", "in tensors) self.tensors = tensors def __getitem__(self, index): t = [tensor[index] for tensor", "os import re import torch import torch.utils.data as data_utils import subprocess import zipfile", "y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate and align", "for record in sequences ] inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align)", "Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class", "for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\"", "in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object", "Decompress gzipped fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh:", "IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last value", "header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I':", "tensor in tensors) self.tensors = tensors def __getitem__(self, index): t = [tensor[index] for", "PyTorch Dataset and Dataloader objects needed for one experiment \"\"\" def __init__(self, data_file,", "batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data data = torch.load(data_file) dataset", "int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] #", "= { 'S': 0, 'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for", "r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True,", "to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last value in tuple \"\"\"", "pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on", "isolates}, f) def align(fh, transl=True): \"\"\" Translate and align pangenome cluster fasta file", "# print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor =", "Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last value in tuple", "zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh,", "Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to", "test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data data = torch.load(data_file) dataset =", "io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn = gz.read() yield fn.decode('utf-8') if __name__", "= all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for", "data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y']) # Test / train split", "from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also", "genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in y ]) # print(ylabels.shape) #", "\"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta", "= FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp =", "if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with", "# print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y':", "'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m", "in y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype)", "import re import torch import torch.utils.data as data_utils import subprocess import zipfile import", "print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates},", "= proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in zip", "torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate", "stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for", "self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that", "in output: m = pattern.match(f, len(f)-6) d = m.group(1) # print(d) y =", "in zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with", "\"\"\"Snakemake function Split and transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0)", "# Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size,", "= np.array([ encoding[v] for v in y ]) # print(ylabels.shape) # print(X.shape) #", "import io import os import re import torch import torch.utils.data as data_utils import", "X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True):", "as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__", "metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy()", "import numpy as np import io import os import re import torch import", "fn = gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"):", "proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp =", "import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset:", "print(d) y = metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit]", "in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in", "fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn = gz.read() yield fn.decode('utf-8')", "= IndexTensorDataset(data['X'], data['y']) # Test / train split dataset_size = len(dataset) indices =", "data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U')", "self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and transform input data", "import pandas as pd import numpy as np import io import os import", "def __len__(self): return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides access to", "\"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln', 'w') as", "0 for z in zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh", "tensors def __getitem__(self, index): t = [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t))", "data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and transform input data \"\"\" genesdf", "random_seed, validation_split=0): # Load tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'], data['y'])", "with gzip.open(fh, 'rb') as gz: fn = gz.read() yield fn.decode('utf-8') if __name__ ==", "print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor,", "as data_utils import subprocess import zipfile import zlib from Bio import AlignIO from", "\"r\") as zh: i = 0 for z in zh.infolist(): if not z.is_dir():", "pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f, len(f)-6) d =", "data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input,", "output: m = pattern.match(f, len(f)-6) d = m.group(1) # print(d) y = metadf[d]", "= tensors def __getitem__(self, index): t = [tensor[index] for tensor in self.tensors] t.append(index)", "import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset,", "shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ]", "= [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0) class", "transform(input, output): \"\"\"Snakemake function Split and transform input data \"\"\" genesdf = pd.read_csv(input[1],", "index): t = [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return", "in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh: with open('data/tmp/test.aln', 'w') as ofh: ofh.write(align(ifh)) break", "= zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn = gz.read()", "split dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if", "self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates']", "inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp = \"\".join(inp) align,", "return self.tensors[0].size(0) class GeneDataset: \"\"\" Container object that provides access to the PyTorch", "from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import MuscleCommandline", "that provides access to the PyTorch Dataset and Dataloader objects needed for one", "\"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe),", "for z in zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh =", "align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files", "fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i =", "def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in zip archive \"\"\" with", "decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf,", "len(f)-6) d = m.group(1) # print(d) y = metadf[d] omit = pd.isnull(y) isolates", "as pd import numpy as np import io import os import re import", "as gz: fn = gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for fn", "\"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors", "i = 0 for z in zh.infolist(): if not z.is_dir(): print(z.filename) gz =", "numpy as np import io import os import re import torch import torch.utils.data", "0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m =", "for f in output: m = pattern.match(f, len(f)-6) d = m.group(1) # print(d)", "and transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0])", "= data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates", "def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors =", "torch import torch.utils.data as data_utils import subprocess import zipfile import zlib from Bio", "if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler", "print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X)", "z in zh.infolist(): if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz)", "sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and transform input", "np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices)", "y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor", "\"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last value in", "indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices,", "torch.utils.data as data_utils import subprocess import zipfile import zlib from Bio import AlignIO", "'S': 0, 'I': 0.5, 'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in", "shuffle_dataset, random_seed, validation_split=0): # Load tensor data data = torch.load(data_file) dataset = IndexTensorDataset(data['X'],", "__getitem__ also returns indices as last value in tuple \"\"\" def __init__(self, *tensors):", "as last value in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0)", "1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f, len(f)-6)", "file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc =", "\">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences ] inp = \"\".join(inp) align, err = proc.communicate(input=inp)", "in sequences ] inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf,", "zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for z in zh.infolist(): if not", "sequences ] inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True):", "import MuscleCommandline class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices", "dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split * dataset_size)) if shuffle_dataset:", "= m.group(1) # print(d) y = metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit]", "ylabels = np.array([ encoding[v] for v in y ]) # print(ylabels.shape) # print(X.shape)", "= metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y = y.loc[~omit] X =", "= io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn = gz.read() yield fn.decode('utf-8') if", "tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)", "import zipfile import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta", "} pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f, len(f)-6) d", "y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f)", "and align pangenome cluster fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) #", "]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor =", "fn.decode('utf-8') if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as ifh:", "all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors def __getitem__(self, index):", "print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn", "# print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels) X_tensor = torch.from_numpy(X) torch.save({'y': y_tensor, 'X':", "fasta file \"\"\" align_exe = MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc", "__init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors) self.tensors = tensors", "= pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5, 'R':", "torch.save({'y': y_tensor, 'X': X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate and", "value in tuple \"\"\" def __init__(self, *tensors): assert all(tensors[0].size(0) == tensor.size(0) for tensor", "all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = { 'S': 0, 'I': 0.5, 'R': 1 }", "= pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding = {", "subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\"", "transl=True): \"\"\" Decompress gzipped fasta files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\")", "data['y']) # Test / train split dataset_size = len(dataset) indices = list(range(dataset_size)) split", "# print(ylabels.shape) # print(X.shape) # print(isolates.shape) # print(isolates[0]) # print(isolates.dtype) y_tensor = torch.from_numpy(ylabels)", "data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates =", "shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, test_indices = indices[split:], indices[:split] # Initialize Dataloaders train_sampler =", "inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress", "clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False)", "y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in y ])", "Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader", "proc.communicate(input=inp) return(align) def decompress(zipf, transl=True): \"\"\" Decompress gzipped fasta files in zip archive", "archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for z in", "zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications import", "import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from Bio.Align.Applications", "access to the PyTorch Dataset and Dataloader objects needed for one experiment \"\"\"", "transform input data \"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates", "t = [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self): return self.tensors[0].size(0)", "zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn = gz.read() yield", "zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for z", "the PyTorch Dataset and Dataloader objects needed for one experiment \"\"\" def __init__(self,", "= MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE,", "genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding =", "X_tensor, 'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate and align pangenome cluster", "def align(fh, transl=True): \"\"\" Translate and align pangenome cluster fasta file \"\"\" align_exe", "batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output):", "and Dataloader objects needed for one experiment \"\"\" def __init__(self, data_file, batch_size, test_split,", "= data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function Split", "MuscleCommandline( r'C:\\Users\\matthewwhiteside\\workspace\\b_ecoli\\muscle\\muscle3.8.31_i86win32.exe', clwstrict=True) # Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "import torch import torch.utils.data as data_utils import subprocess import zipfile import zlib from", "gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as gz: fn =", "data_utils import subprocess import zipfile import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO", "= gz.read() yield fn.decode('utf-8') if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with", "record in sequences ] inp = \"\".join(inp) align, err = proc.communicate(input=inp) return(align) def", "if not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb')", "y = y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in", "one experiment \"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load", "subprocess import zipfile import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator,", "'R': 1 } pattern = re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f,", "# Align on stdin/stdout proc = subprocess.Popen(str(align_exe), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=False) sequences", "not z.is_dir(): print(z.filename) gz = zh.read(z.filename) fh = io.BytesIO(gz) with gzip.open(fh, 'rb') as", "batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and transform", "test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) self.test_loader = data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler)", "encoding[v] for v in y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape) #", "Test / train split dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(test_split", "gzip import pandas as pd import numpy as np import io import os", "with zipfile.ZipFile(zipf, \"r\") as zh: i = 0 for z in zh.infolist(): if", "__getitem__(self, index): t = [tensor[index] for tensor in self.tensors] t.append(index) return(tuple(t)) def __len__(self):", "universal_newlines=True, shell=False) sequences = FastaIterator(fh) inp = [ \">\"+record.id+\"\\n\"+str(record.translate(table=\"Bacterial\").seq)+\"\\n\" for record in sequences", "data_utils.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler) self.isolates = data['isolates'] def transform(input, output): \"\"\"Snakemake function Split and", "indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader = data_utils.DataLoader(dataset,", "m.group(1) # print(d) y = metadf[d] omit = pd.isnull(y) isolates = all_isolates[~omit] y", "'isolates': isolates}, f) def align(fh, transl=True): \"\"\" Translate and align pangenome cluster fasta", "zipfile import zlib from Bio import AlignIO from Bio.SeqIO.FastaIO import FastaIterator, as_fasta from", "= indices[split:], indices[:split] # Initialize Dataloaders train_sampler = data_utils.SubsetRandomSampler(train_indices) test_sampler = data_utils.SubsetRandomSampler(test_indices) self.train_loader", "__init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data data =", "= re.compile(\"(\\w{3}).pt$\") for f in output: m = pattern.match(f, len(f)-6) d = m.group(1)", "pd import numpy as np import io import os import re import torch", "\"\"\" genesdf = pd.read_csv(input[1], index_col=0, header=0) metadf = pd.read_csv(input[0]) all_isolates = metadf[\"Isolate\"].to_numpy('U') encoding", "d = m.group(1) # print(d) y = metadf[d] omit = pd.isnull(y) isolates =", "np.array([ encoding[v] for v in y ]) # print(ylabels.shape) # print(X.shape) # print(isolates.shape)", "files in zip archive \"\"\" with zipfile.ZipFile(zipf, \"r\") as zh: i = 0", "\"\"\" def __init__(self, data_file, batch_size, test_split, shuffle_dataset, random_seed, validation_split=0): # Load tensor data", "= y.loc[~omit] X = genesdf.loc[isolates].to_numpy() ylabels = np.array([ encoding[v] for v in y", "class IndexTensorDataset: \"\"\" Identical to torch.utils.data.Dataset.TensorDataset, but __getitem__ also returns indices as last", "zh: i = 0 for z in zh.infolist(): if not z.is_dir(): print(z.filename) gz", "yield fn.decode('utf-8') if __name__ == \"__main__\": for fn in decompress(\"data/raw/ecoli/pan_genome_sequences.zip\"): with io.StringIO(fn) as" ]
[ "not in jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\",", "shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess", "Installs Jenkins and a separate Java version. \"\"\" import os import shutil from", "if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\")", "/dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions", "| sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"])", "downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): # Install", "\"w\") as file: file.write(jenkinsService) # Restart the service. runProcess([\"systemctl\", \"daemon-reload\"]) runProcess([\"systemctl\", \"restart\", \"jenkins\"])", "= file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file. jenkinsService =", "due to the wrong java version being used. pass # Set up GraalVM.", "# Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key", "Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService)", "file path and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if", "and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not", "downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") #", "def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl", "version. \"\"\" import os import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import", "\"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc]", "tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list", "open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the service. runProcess([\"systemctl\", \"daemon-reload\"]) runProcess([\"systemctl\", \"restart\",", "\"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that are likely due to", "path and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\"", "> /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\")", "in jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\")", "os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\")", "if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee", "used. pass # Set up GraalVM. # Jenkins requires Java 11, while other", "Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins. if", "restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in", "https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ |", "open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify", "os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service file path", "file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\",", "Jenkins requires Java 11, while other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"):", "exceptions that are likely due to the wrong java version being used. pass", "os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service file path and", "jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file. jenkinsService", "as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the", "jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as", "import os import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from", "deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try:", "binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\",", "jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the", "# Jenkins requires Java 11, while other programs require newer versions. if not", "\"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that are likely due to the", "newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation =", "\"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb", "are likely due to the wrong java version being used. pass # Set", "# Ignore exceptions that are likely due to the wrong java version being", "graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service", "if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0])", "\"\"\" import os import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation", "# Set up GraalVM. # Jenkins requires Java 11, while other programs require", "up GraalVM. # Jenkins requires Java 11, while other programs require newer versions.", "\"jenkins\"]) except Exception: # Ignore exceptions that are likely due to the wrong", "from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins.", "to the wrong java version being used. pass # Set up GraalVM. #", "while other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation", "separate Java version. \"\"\" import os import shutil from Step.Standard.DownloadArchive import downloadArchive from", "os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable", "with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the service. runProcess([\"systemctl\", \"daemon-reload\"]) runProcess([\"systemctl\",", "likely due to the wrong java version being used. pass # Set up", "it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService:", "-fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/", "service file path and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read()", "tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception:", "https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\",", "getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins", "Add GraalVM to the Jenkins service file path and restart it. with open(\"/etc/init.d/jenkins\")", "Jenkins service file path and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService =", "sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee", "Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins():", "other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation =", "\"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add", "import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import", "Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo", "being used. pass # Set up GraalVM. # Jenkins requires Java 11, while", "getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"):", "/dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\",", "import runProcess def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\",", "Exception: # Ignore exceptions that are likely due to the wrong java version", "installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL", "that are likely due to the wrong java version being used. pass #", "a separate Java version. \"\"\" import os import shutil from Step.Standard.DownloadArchive import downloadArchive", "runProcess def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"])", "| sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo", "Jenkins and a separate Java version. \"\"\" import os import shutil from Step.Standard.DownloadArchive", "try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that are likely", "file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file.", "os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc >", "except Exception: # Ignore exceptions that are likely due to the wrong java", "Java version. \"\"\" import os import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile", "wrong java version being used. pass # Set up GraalVM. # Jenkins requires", "jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the service. runProcess([\"systemctl\",", "os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"])", "the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) #", "shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service file path and restart", "\"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM", "\"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the service. runProcess([\"systemctl\", \"daemon-reload\"])", "runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\")", "Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\",", "\"\"\" TheNexusAvenger Installs Jenkins and a separate Java version. \"\"\" import os import", "GraalVM to the Jenkins service file path and restart it. with open(\"/etc/init.d/jenkins\") as", "from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def", "requires Java 11, while other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\",", "> /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore", "graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to", "os import shutil from Step.Standard.DownloadArchive import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess", "file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart", "[signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\",", "not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc", "Java 11, while other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\",", "with open(\"/etc/init.d/jenkins\") as file: jenkinsService = file.read() if \"/usr/lib/jvm/java-11-graal\" not in jenkinsService: #", "/etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: #", "\"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo tee /usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo", "# Add GraalVM to the Jenkins service file path and restart it. with", "require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation", "programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\")", "version being used. pass # Set up GraalVM. # Jenkins requires Java 11,", "pass # Set up GraalVM. # Jenkins requires Java 11, while other programs", "runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that", "import downloadArchive from Step.Standard.DownloadFile import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): #", "and a separate Java version. \"\"\" import os import shutil from Step.Standard.DownloadArchive import", "= jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file: file.write(jenkinsService) # Restart the service.", "the wrong java version being used. pass # Set up GraalVM. # Jenkins", "11, while other programs require newer versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\")", "/usr/share/keyrings/jenkins-keyring.asc > /dev/null\") os.system(\"echo deb [signed-by=/usr/share/keyrings/jenkins-keyring.asc] https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list >", "runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that are likely due", "Set up GraalVM. # Jenkins requires Java 11, while other programs require newer", "GraalVM. # Jenkins requires Java 11, while other programs require newer versions. if", "TheNexusAvenger Installs Jenkins and a separate Java version. \"\"\" import os import shutil", "= getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the", "\"/usr/lib/jvm/java-11-graal\" not in jenkinsService: # Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with", "# Modify the file. jenkinsService = jenkinsService.replace(\"PATH=/bin:\", \"PATH=/usr/lib/jvm/java-11-graal/bin:/bin:\") with open(\"/etc/init.d/jenkins\", \"w\") as file:", "not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation,", "= os.path.join(graalDownloadLocation, os.listdir(graalDownloadLocation)[0]) shutil.copytree(graalDownloadLocation, \"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service file", "java version being used. pass # Set up GraalVM. # Jenkins requires Java", "to the Jenkins service file path and restart it. with open(\"/etc/init.d/jenkins\") as file:", "Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\", \"install\", \"-y\", \"curl\"]) os.system(\"curl -fsSL https://pkg.jenkins.io/debian-stable/jenkins.io.key |", "versions. if not os.path.exists(\"/usr/lib/jvm/java-11-graal\"): downloadArchive(\"https://github.com/graalvm/graalvm-ce-builds/releases/download/vm-21.3.0/graalvm-ce-java11-linux-amd64-21.3.0.tar.gz\", \"tar.gz\", \"graalvm-11\") graalDownloadLocation = getDownloadLocation(\"graalvm-11\") graalDownloadLocation = os.path.join(graalDownloadLocation,", "\"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except Exception: # Ignore exceptions that are", "\"/usr/lib/jvm/java-11-graal\") # Add GraalVM to the Jenkins service file path and restart it.", "<reponame>TheNexusAvenger/Kubuntu-Helper-Scripts \"\"\" TheNexusAvenger Installs Jenkins and a separate Java version. \"\"\" import os", "the Jenkins service file path and restart it. with open(\"/etc/init.d/jenkins\") as file: jenkinsService", "Ignore exceptions that are likely due to the wrong java version being used.", "import getDownloadLocation from Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins. if not", "sudo tee /etc/apt/sources.list.d/jenkins.list > /dev/null\") runProcess([\"apt\", \"update\"]) try: runProcess([\"apt\", \"install\", \"-y\", \"jenkins\"]) except", "from Step.Standard.RunProcess import runProcess def installJenkins(): # Install Jenkins. if not os.path.exists(\"/etc/init.d/jenkins\"): runProcess([\"apt\"," ]
[ "\"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219., 287.],", "16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28,", "[419., 484.], [398., 488.], [379., 486.], [353., 475.], # mouth_inner [340., 451.], [378.,", "left_brow [254., 252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.], # right_brow", "270.], [482., 269.], [501., 279.7], [484., 287.], [462., 288.], # mouth_outer [328., 448.],", "32, 33, 34, 35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43, 44,", "44, 45, 46, 47], mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56,", "nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38, 39,", "[317., 542.], [357., 568.], [400., 576.], [444., 566.], [482., 540.], [518., 507.], [546.,", "64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\",", "[501., 279.7], [484., 287.], [462., 288.], # mouth_outer [328., 448.], [353., 437.], [378.,", "[379., 486.], [353., 475.], # mouth_inner [340., 451.], [378., 448.], [397., 450.], [415.,", "51, 52, 53, 54, 55, 56, 57, 58, 59], mouth_inner=[60, 61, 62, 63,", "57, 58, 59], mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69])", "428.], [254., 471.], [284., 511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.],", "269.], [501., 279.7], [484., 287.], [462., 288.], # mouth_outer [328., 448.], [353., 437.],", "[398., 488.], [379., 486.], [353., 475.], # mouth_inner [340., 451.], [378., 448.], [397.,", "66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\",", "37, 38, 39, 40, 41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49,", "40, 41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49, 50, 51, 52,", "\"\"\"construct landmark models \"\"\" import json def read_json(fname): with open(fname) as fid: data", "nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43,", "[546., 467.], [564., 424.], [571., 376.], [576., 329.], [576., 281.], # left_brow [254.,", "29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38, 39, 40, 41],", "63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\",", "[482., 223.], [512., 228.], [535., 245.], # nose_bridge [394., 277.], [394., 309.], [394.,", "334.], [227., 381.], [237., 428.], [254., 471.], [284., 511.], [317., 542.], [357., 568.],", "18, 19, 20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29, 30],", "13, 14, 15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24, 25,", "[450., 229.], [482., 223.], [512., 228.], [535., 245.], # nose_bridge [394., 277.], [394.,", "[417., 462.], [397., 463.], [377., 460.], # left_pupil [319., 278.], # right_pupil [474.,", "[396., 438.], [416., 432.], [442., 437.], [468., 446.], [444., 472.], [419., 484.], [398.,", "399.], [396., 404.], [414., 398.], [430., 391.], # left_eye [288., 283.], [307., 271.],", "434.], [396., 438.], [416., 432.], [442., 437.], [468., 446.], [444., 472.], [419., 484.],", "438.], [416., 432.], [442., 437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.],", "507.], [546., 467.], [564., 424.], [571., 376.], [576., 329.], [576., 281.], # left_brow", "53, 54, 55, 56, 57, 58, 59], mouth_inner=[60, 61, 62, 63, 64, 65,", "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,", "8, 9, 10, 11, 12, 13, 14, 15, 16], left_brow=[17, 18, 19, 20,", "281.], # left_brow [254., 252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.],", "371.], [360., 392.], # nose_lower [377., 399.], [396., 404.], [414., 398.], [430., 391.],", "[288., 283.], [307., 271.], [330., 271.], [348., 285.], [329., 290.], [306., 290.], #", "left_pupil [319., 278.], # right_pupil [474., 277.] ] model = dict(pos=pos, index=index, keys=keys)", "[340., 451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.], [417., 462.], [397.,", "[378., 448.], [397., 450.], [415., 448.], [457., 449.], [417., 462.], [397., 463.], [377.,", "[512., 228.], [535., 245.], # nose_bridge [394., 277.], [394., 309.], [394., 341.], [395.,", "[335., 232.], [363., 244.], # right_brow [422., 241.], [450., 229.], [482., 223.], [512.,", "283.], [307., 271.], [330., 271.], [348., 285.], [329., 290.], [306., 290.], # right_eye", "511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.], [482., 540.], [518., 507.],", "right_pupil [474., 277.] ] model = dict(pos=pos, index=index, keys=keys) if __name__ == \"__main__\":", "left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29,", "283.], [459., 270.], [482., 269.], [501., 279.7], [484., 287.], [462., 288.], # mouth_outer", "38, 39, 40, 41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49, 50,", "[400., 576.], [444., 566.], [482., 540.], [518., 507.], [546., 467.], [564., 424.], [571.,", "\"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw", "376.], [576., 329.], [576., 281.], # left_brow [254., 252.], [274., 233.], [304., 227.],", "\"\"\" import json def read_json(fname): with open(fname) as fid: data = json.load(fid) return", "[304., 227.], [335., 232.], [363., 244.], # right_brow [422., 241.], [450., 229.], [482.,", "9, 10, 11, 12, 13, 14, 15, 16], left_brow=[17, 18, 19, 20, 21],", "233.], [304., 227.], [335., 232.], [363., 244.], # right_brow [422., 241.], [450., 229.],", "227.], [335., 232.], [363., 244.], # right_brow [422., 241.], [450., 229.], [482., 223.],", "[394., 341.], [395., 371.], [360., 392.], # nose_lower [377., 399.], [396., 404.], [414.,", "448.], [397., 450.], [415., 448.], [457., 449.], [417., 462.], [397., 463.], [377., 460.],", "244.], # right_brow [422., 241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.],", "left_eye [288., 283.], [307., 271.], [330., 271.], [348., 285.], [329., 290.], [306., 290.],", "[484., 287.], [462., 288.], # mouth_outer [328., 448.], [353., 437.], [378., 434.], [396.,", "67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\",", "[328., 448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.], [442., 437.], [468.,", "[ # jaw [219., 287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.],", "55, 56, 57, 58, 59], mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67],", "[422., 241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.], # nose_bridge [394.,", "[442., 283.], [459., 270.], [482., 269.], [501., 279.7], [484., 287.], [462., 288.], #", "540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.], [576., 329.], [576., 281.],", "285.], [329., 290.], [306., 290.], # right_eye [442., 283.], [459., 270.], [482., 269.],", "# nose_lower [377., 399.], [396., 404.], [414., 398.], [430., 391.], # left_eye [288.,", "mouth_inner [340., 451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.], [417., 462.],", "# right_brow [422., 241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.], #", "fid: data = json.load(fid) return data def write_json(model, fname): with open(fname, \"w\") as", "30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42,", "[353., 437.], [378., 434.], [396., 438.], [416., 432.], [442., 437.], [468., 446.], [444.,", "35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43, 44, 45, 46, 47],", "fid) index = dict( jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8,", "[576., 281.], # left_brow [254., 252.], [274., 233.], [304., 227.], [335., 232.], [363.,", "576.], [444., 566.], [482., 540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.],", "[363., 244.], # right_brow [422., 241.], [450., 229.], [482., 223.], [512., 228.], [535.,", "[360., 392.], # nose_lower [377., 399.], [396., 404.], [414., 398.], [430., 391.], #", "\"mouth_inner\"] pos = [ # jaw [219., 287.], [220., 334.], [227., 381.], [237.,", "\"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219., 287.], [220., 334.],", "437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.], [379., 486.], [353., 475.],", "41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49, 50, 51, 52, 53,", "65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\",", "[394., 277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.], # nose_lower [377.,", "json.load(fid) return data def write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model, fid)", "58, 59], mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys", "437.], [378., 434.], [396., 438.], [416., 432.], [442., 437.], [468., 446.], [444., 472.],", "[284., 511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.], [482., 540.], [518.,", "[357., 568.], [400., 576.], [444., 566.], [482., 540.], [518., 507.], [546., 467.], [564.,", "62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\",", "52, 53, 54, 55, 56, 57, 58, 59], mouth_inner=[60, 61, 62, 63, 64,", "[457., 449.], [417., 462.], [397., 463.], [377., 460.], # left_pupil [319., 278.], #", "models \"\"\" import json def read_json(fname): with open(fname) as fid: data = json.load(fid)", "[430., 391.], # left_eye [288., 283.], [307., 271.], [330., 271.], [348., 285.], [329.,", "471.], [284., 511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.], [482., 540.],", "432.], [442., 437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.], [379., 486.],", "475.], # mouth_inner [340., 451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.],", "[397., 450.], [415., 448.], [457., 449.], [417., 462.], [397., 463.], [377., 460.], #", "448.], [457., 449.], [417., 462.], [397., 463.], [377., 460.], # left_pupil [319., 278.],", "[444., 472.], [419., 484.], [398., 488.], [379., 486.], [353., 475.], # mouth_inner [340.,", "# nose_bridge [394., 277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.], #", "keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos =", "[414., 398.], [430., 391.], # left_eye [288., 283.], [307., 271.], [330., 271.], [348.,", "245.], # nose_bridge [394., 277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.],", "392.], # nose_lower [377., 399.], [396., 404.], [414., 398.], [430., 391.], # left_eye", "460.], # left_pupil [319., 278.], # right_pupil [474., 277.] ] model = dict(pos=pos,", "43, 44, 45, 46, 47], mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55,", "381.], [237., 428.], [254., 471.], [284., 511.], [317., 542.], [357., 568.], [400., 576.],", "[274., 233.], [304., 227.], [335., 232.], [363., 244.], # right_brow [422., 241.], [450.,", "34, 35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43, 44, 45, 46,", "24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36,", "return data def write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model, fid) index", "[518., 507.], [546., 467.], [564., 424.], [571., 376.], [576., 329.], [576., 281.], #", "25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37,", "449.], [417., 462.], [397., 463.], [377., 460.], # left_pupil [319., 278.], # right_pupil", "\"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219., 287.], [220., 334.], [227.,", "20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32,", "288.], # mouth_outer [328., 448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.],", "= json.load(fid) return data def write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model,", "[397., 463.], [377., 460.], # left_pupil [319., 278.], # right_pupil [474., 277.] ]", "252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.], # right_brow [422., 241.],", "[394., 309.], [394., 341.], [395., 371.], [360., 392.], # nose_lower [377., 399.], [396.,", "271.], [348., 285.], [329., 290.], [306., 290.], # right_eye [442., 283.], [459., 270.],", "json.dump(model, fid) index = dict( jaw=[0, 1, 2, 3, 4, 5, 6, 7,", "[219., 287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.], [284., 511.], [317.,", "19, 20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31,", "[377., 399.], [396., 404.], [414., 398.], [430., 391.], # left_eye [288., 283.], [307.,", "278.], # right_pupil [474., 277.] ] model = dict(pos=pos, index=index, keys=keys) if __name__", "[306., 290.], # right_eye [442., 283.], [459., 270.], [482., 269.], [501., 279.7], [484.,", "566.], [482., 540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.], [576., 329.],", "open(fname, \"w\") as fid: json.dump(model, fid) index = dict( jaw=[0, 1, 2, 3,", "271.], [330., 271.], [348., 285.], [329., 290.], [306., 290.], # right_eye [442., 283.],", "[227., 381.], [237., 428.], [254., 471.], [284., 511.], [317., 542.], [357., 568.], [400.,", "as fid: data = json.load(fid) return data def write_json(model, fname): with open(fname, \"w\")", "462.], [397., 463.], [377., 460.], # left_pupil [319., 278.], # right_pupil [474., 277.]", "232.], [363., 244.], # right_brow [422., 241.], [450., 229.], [482., 223.], [512., 228.],", "26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38,", "= dict( jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,", "341.], [395., 371.], [360., 392.], # nose_lower [377., 399.], [396., 404.], [414., 398.],", "data = json.load(fid) return data def write_json(model, fname): with open(fname, \"w\") as fid:", "def write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model, fid) index = dict(", "28, 29, 30], nose_lower=[31, 32, 33, 34, 35], left_eye=[36, 37, 38, 39, 40,", "56, 57, 58, 59], mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67], left_pupil=[68],", "287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.], [284., 511.], [317., 542.],", "[254., 252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.], # right_brow [422.,", "14, 15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24, 25, 26],", "[442., 437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.], [379., 486.], [353.,", "47], mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59],", "\"w\") as fid: json.dump(model, fid) index = dict( jaw=[0, 1, 2, 3, 4,", "# mouth_outer [328., 448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.], [442.,", "[237., 428.], [254., 471.], [284., 511.], [317., 542.], [357., 568.], [400., 576.], [444.,", "mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59], mouth_inner=[60,", "# left_pupil [319., 278.], # right_pupil [474., 277.] ] model = dict(pos=pos, index=index,", "467.], [564., 424.], [571., 376.], [576., 329.], [576., 281.], # left_brow [254., 252.],", "write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model, fid) index = dict( jaw=[0,", "39, 40, 41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49, 50, 51,", "right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48, 49, 50, 51, 52, 53, 54,", "[468., 446.], [444., 472.], [419., 484.], [398., 488.], [379., 486.], [353., 475.], #", "568.], [400., 576.], [444., 566.], [482., 540.], [518., 507.], [546., 467.], [564., 424.],", "with open(fname) as fid: data = json.load(fid) return data def write_json(model, fname): with", "as fid: json.dump(model, fid) index = dict( jaw=[0, 1, 2, 3, 4, 5,", "542.], [357., 568.], [400., 576.], [444., 566.], [482., 540.], [518., 507.], [546., 467.],", "[444., 566.], [482., 540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.], [576.,", "[462., 288.], # mouth_outer [328., 448.], [353., 437.], [378., 434.], [396., 438.], [416.,", "[415., 448.], [457., 449.], [417., 462.], [397., 463.], [377., 460.], # left_pupil [319.,", "mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\",", "\"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219., 287.], [220., 334.], [227., 381.],", "nose_lower [377., 399.], [396., 404.], [414., 398.], [430., 391.], # left_eye [288., 283.],", "241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.], # nose_bridge [394., 277.],", "391.], # left_eye [288., 283.], [307., 271.], [330., 271.], [348., 285.], [329., 290.],", "[353., 475.], # mouth_inner [340., 451.], [378., 448.], [397., 450.], [415., 448.], [457.,", "open(fname) as fid: data = json.load(fid) return data def write_json(model, fname): with open(fname,", "49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59], mouth_inner=[60, 61,", "446.], [444., 472.], [419., 484.], [398., 488.], [379., 486.], [353., 475.], # mouth_inner", "424.], [571., 376.], [576., 329.], [576., 281.], # left_brow [254., 252.], [274., 233.],", "\"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219., 287.], [220.,", "54, 55, 56, 57, 58, 59], mouth_inner=[60, 61, 62, 63, 64, 65, 66,", "[535., 245.], # nose_bridge [394., 277.], [394., 309.], [394., 341.], [395., 371.], [360.,", "472.], [419., 484.], [398., 488.], [379., 486.], [353., 475.], # mouth_inner [340., 451.],", "# left_brow [254., 252.], [274., 233.], [304., 227.], [335., 232.], [363., 244.], #", "def read_json(fname): with open(fname) as fid: data = json.load(fid) return data def write_json(model,", "488.], [379., 486.], [353., 475.], # mouth_inner [340., 451.], [378., 448.], [397., 450.],", "data def write_json(model, fname): with open(fname, \"w\") as fid: json.dump(model, fid) index =", "59], mouth_inner=[60, 61, 62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys =", "fname): with open(fname, \"w\") as fid: json.dump(model, fid) index = dict( jaw=[0, 1,", "23, 24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34, 35],", "fid: json.dump(model, fid) index = dict( jaw=[0, 1, 2, 3, 4, 5, 6,", "[395., 371.], [360., 392.], # nose_lower [377., 399.], [396., 404.], [414., 398.], [430.,", "[377., 460.], # left_pupil [319., 278.], # right_pupil [474., 277.] ] model =", "279.7], [484., 287.], [462., 288.], # mouth_outer [328., 448.], [353., 437.], [378., 434.],", "pos = [ # jaw [219., 287.], [220., 334.], [227., 381.], [237., 428.],", "21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33,", "[348., 285.], [329., 290.], [306., 290.], # right_eye [442., 283.], [459., 270.], [482.,", "450.], [415., 448.], [457., 449.], [417., 462.], [397., 463.], [377., 460.], # left_pupil", "3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,", "290.], [306., 290.], # right_eye [442., 283.], [459., 270.], [482., 269.], [501., 279.7],", "277.] ] model = dict(pos=pos, index=index, keys=keys) if __name__ == \"__main__\": write_json(model, \"model.json\")", "right_brow [422., 241.], [450., 229.], [482., 223.], [512., 228.], [535., 245.], # nose_bridge", "287.], [462., 288.], # mouth_outer [328., 448.], [353., 437.], [378., 434.], [396., 438.],", "right_eye [442., 283.], [459., 270.], [482., 269.], [501., 279.7], [484., 287.], [462., 288.],", "4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],", "[220., 334.], [227., 381.], [237., 428.], [254., 471.], [284., 511.], [317., 542.], [357.,", "290.], # right_eye [442., 283.], [459., 270.], [482., 269.], [501., 279.7], [484., 287.],", "right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos", "398.], [430., 391.], # left_eye [288., 283.], [307., 271.], [330., 271.], [348., 285.],", "import json def read_json(fname): with open(fname) as fid: data = json.load(fid) return data", "jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,", "= [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [", "[482., 269.], [501., 279.7], [484., 287.], [462., 288.], # mouth_outer [328., 448.], [353.,", "nose_bridge [394., 277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.], # nose_lower", "[482., 540.], [518., 507.], [546., 467.], [564., 424.], [571., 376.], [576., 329.], [576.,", "left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43, 44, 45, 46, 47], mouth_outer=[48,", "33, 34, 35], left_eye=[36, 37, 38, 39, 40, 41], right_eye=[42, 43, 44, 45,", "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,", "# mouth_inner [340., 451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.], [417.,", "landmark models \"\"\" import json def read_json(fname): with open(fname) as fid: data =", "jaw [219., 287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.], [284., 511.],", "451.], [378., 448.], [397., 450.], [415., 448.], [457., 449.], [417., 462.], [397., 463.],", "463.], [377., 460.], # left_pupil [319., 278.], # right_pupil [474., 277.] ] model", "# left_eye [288., 283.], [307., 271.], [330., 271.], [348., 285.], [329., 290.], [306.,", "223.], [512., 228.], [535., 245.], # nose_bridge [394., 277.], [394., 309.], [394., 341.],", "[330., 271.], [348., 285.], [329., 290.], [306., 290.], # right_eye [442., 283.], [459.,", "[254., 471.], [284., 511.], [317., 542.], [357., 568.], [400., 576.], [444., 566.], [482.,", "read_json(fname): with open(fname) as fid: data = json.load(fid) return data def write_json(model, fname):", "[564., 424.], [571., 376.], [576., 329.], [576., 281.], # left_brow [254., 252.], [274.,", "[396., 404.], [414., 398.], [430., 391.], # left_eye [288., 283.], [307., 271.], [330.,", "# right_eye [442., 283.], [459., 270.], [482., 269.], [501., 279.7], [484., 287.], [462.,", "[\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ #", "dict( jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,", "15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24, 25, 26], nose_bridge=[27,", "277.], [394., 309.], [394., 341.], [395., 371.], [360., 392.], # nose_lower [377., 399.],", "[307., 271.], [330., 271.], [348., 285.], [329., 290.], [306., 290.], # right_eye [442.,", "[571., 376.], [576., 329.], [576., 281.], # left_brow [254., 252.], [274., 233.], [304.,", "50, 51, 52, 53, 54, 55, 56, 57, 58, 59], mouth_inner=[60, 61, 62,", "[378., 434.], [396., 438.], [416., 432.], [442., 437.], [468., 446.], [444., 472.], [419.,", "[474., 277.] ] model = dict(pos=pos, index=index, keys=keys) if __name__ == \"__main__\": write_json(model,", "\"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"] pos = [ # jaw [219.,", "[416., 432.], [442., 437.], [468., 446.], [444., 472.], [419., 484.], [398., 488.], [379.,", "45, 46, 47], mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56, 57,", "11, 12, 13, 14, 15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23,", "329.], [576., 281.], # left_brow [254., 252.], [274., 233.], [304., 227.], [335., 232.],", "mouth_outer [328., 448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.], [442., 437.],", "61, 62, 63, 64, 65, 66, 67], left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\",", "with open(fname, \"w\") as fid: json.dump(model, fid) index = dict( jaw=[0, 1, 2,", "448.], [353., 437.], [378., 434.], [396., 438.], [416., 432.], [442., 437.], [468., 446.],", "[319., 278.], # right_pupil [474., 277.] ] model = dict(pos=pos, index=index, keys=keys) if", "[459., 270.], [482., 269.], [501., 279.7], [484., 287.], [462., 288.], # mouth_outer [328.,", "309.], [394., 341.], [395., 371.], [360., 392.], # nose_lower [377., 399.], [396., 404.],", "486.], [353., 475.], # mouth_inner [340., 451.], [378., 448.], [397., 450.], [415., 448.],", "404.], [414., 398.], [430., 391.], # left_eye [288., 283.], [307., 271.], [330., 271.],", "5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], left_brow=[17,", "index = dict( jaw=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,", "json def read_json(fname): with open(fname) as fid: data = json.load(fid) return data def", "right_brow=[22, 23, 24, 25, 26], nose_bridge=[27, 28, 29, 30], nose_lower=[31, 32, 33, 34,", "# jaw [219., 287.], [220., 334.], [227., 381.], [237., 428.], [254., 471.], [284.,", "46, 47], mouth_outer=[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,", "left_pupil=[68], right_pupil=[69]) keys = [\"jaw\", \"left_brow\", \"right_brow\", \"nose_bridge\", \"nose_lower\", \"left_eye\", \"right_eye\", \"mouth_outer\", \"mouth_inner\"]", "[329., 290.], [306., 290.], # right_eye [442., 283.], [459., 270.], [482., 269.], [501.,", "484.], [398., 488.], [379., 486.], [353., 475.], # mouth_inner [340., 451.], [378., 448.],", "# right_pupil [474., 277.] ] model = dict(pos=pos, index=index, keys=keys) if __name__ ==", "6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], left_brow=[17, 18,", "229.], [482., 223.], [512., 228.], [535., 245.], # nose_bridge [394., 277.], [394., 309.],", "12, 13, 14, 15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22, 23, 24,", "[576., 329.], [576., 281.], # left_brow [254., 252.], [274., 233.], [304., 227.], [335.,", "= [ # jaw [219., 287.], [220., 334.], [227., 381.], [237., 428.], [254.,", "228.], [535., 245.], # nose_bridge [394., 277.], [394., 309.], [394., 341.], [395., 371.],", "7, 8, 9, 10, 11, 12, 13, 14, 15, 16], left_brow=[17, 18, 19,", "10, 11, 12, 13, 14, 15, 16], left_brow=[17, 18, 19, 20, 21], right_brow=[22," ]
[ "= models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self,", "Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r", "get_image_small_url(self): # TODO: get these from the field height = 400 width =", "get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid", "width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail':", "year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def", "True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get", "field height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop':", "for i in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True)", "= 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale':", "[(i, '_' + str(i) + '_') for i in range(1, x + 1)]", "= 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True,", "original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from the field height", "create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid: self.uuid = self.create_uuid()", "import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r,", "def existing_days(self): return self.days.all().count() def __str__(self): return self.name class Day(models.Model): class Meta: unique_together", "def __str__(self): return self.name class Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering", "django.db import models import datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField", "= lambda x: [(i, '_' + str(i) + '_') for i in range(1,", "Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering = ['day', ] calendar =", "= models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in range(1984,", "super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return self.name class Day(models.Model):", "return self.days.all().count() def __str__(self): return self.name class Day(models.Model): class Meta: unique_together = (('day',", "lambda x: [(i, '_' + str(i) + '_') for i in range(1, x", "i in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image", "def get_image_small_url(self): # TODO: get these from the field height = 400 width", "calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' + str(i) +", "datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid()", "= self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return self.name", "+ 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self):", "datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model):", "# TODO: get these from the field height = 1200 width = 1200", "1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): #", "height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True,", "in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self):", "save(self, *args, **kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def", "from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name", "get_image_large_url(self): # TODO: get these from the field height = 1200 width =", "not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def", "] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' + str(i)", "1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location':", "class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for", "models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)]", "YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True,", "DAY_CHOICES = lambda x: [(i, '_' + str(i) + '_') for i in", "range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True)", "for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year )", "return self.name class Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering = ['day',", "= models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get", "'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def __str__(self): return ' '.join([self.calendar.name, str(self.day)])", "default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid:", "from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid =", "FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES =", "class Meta: unique_together = (('day', 'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar,", "+ '_') for i in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source", "400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location':", "def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid: self.uuid =", "= ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_'", "if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count()", "1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True,", "field height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop':", "self.days.all().count() def __str__(self): return self.name class Day(models.Model): class Meta: unique_together = (('day', 'calendar'))", "get these from the field height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({", "shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r)", ") def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid: self.uuid", "models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get these", "name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in", "= (('day', 'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES =", "self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self):", "import FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22) YEAR_CHOICES", "from the field height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width,", "# TODO: get these from the field height = 400 width = 400", "r) for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year", "day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO:", "models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from the field", "ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i,", "range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return", "'size': (width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def", "easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name =", "400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True,", "= 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True,", "return shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar,", "def get_image_large_url(self): # TODO: get these from the field height = 1200 width", "these from the field height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size':", "image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from", "FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from the field height = 400", "filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250) uuid = models.CharField(max_length=22)", "null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs):", "the field height = 1200 width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height),", "'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def __str__(self): return '", "self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these from the field height =", "unique_together = (('day', 'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES", "= models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' + str(i) + '_')", "= models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)] year =", "self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return self.name class", "'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x:", "height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def __str__(self): return", "r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def", "related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' + str(i) + '_') for i", "'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these from the field height", "'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these", "def save(self, *args, **kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs)", "= 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale':", "width = 1200 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail':", "models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField(", "the field height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height),", "from the field height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width,", "(width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self):", "models import datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid", "self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return self.name class Day(models.Model): class", "TODO: get these from the field height = 400 width = 400 return", "= [(r, r) for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4,", "(width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def __str__(self):", "True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these from the field", "height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True,", "get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url", "TODO: get these from the field height = 1200 width = 1200 return", "max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if", "choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self, *args, **kwargs): if not", "existing_days(self): return self.days.all().count() def __str__(self): return self.name class Day(models.Model): class Meta: unique_together =", "return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size': (width, height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location", "in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image =", "(('day', 'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda", "class Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering = ['day', ] calendar", "uuid = models.CharField(max_length=22) YEAR_CHOICES = [(r, r) for r in range(1984, datetime.date.today().year+1)] year", "**kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return self.name class Day(models.Model): class Meta:", "x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source = models.URLField(blank=True) original_image = FilerImageField(null=True) def", "get these from the field height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({", "from django.db import models import datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import", "shortuuid.uuid() def save(self, *args, **kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args,", "import models import datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import", "**kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return", "x: [(i, '_' + str(i) + '_') for i in range(1, x +", "True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def __str__(self): return ' '.join([self.calendar.name,", "= FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from the field height =", "self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self): return self.days.all().count() def __str__(self): return", "import datetime from easy_thumbnails.files import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class", "str(i) + '_') for i in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24))", "'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these from the", "self.name class Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering = ['day', ]", "import get_thumbnailer from filer.fields.image import FilerImageField import shortuuid class Calendar(models.Model): name = models.CharField(max_length=250)", "}).url def get_image_large_url(self): # TODO: get these from the field height = 1200", "models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES, default=datetime.datetime.now().year ) def create_uuid(self): return shortuuid.uuid() def save(self, *args,", "'_') for i in range(1, x + 1)] day = models.IntegerField(choices=DAY_CHOICES(24)) image_source =", "True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO: get these from", "= models.URLField(blank=True) original_image = FilerImageField(null=True) def get_image_small_url(self): # TODO: get these from the", "these from the field height = 400 width = 400 return get_thumbnailer(self.original_image.file).get_thumbnail({ 'size':", "'_' + str(i) + '_') for i in range(1, x + 1)] day", "[(r, r) for r in range(1984, datetime.date.today().year+1)] year = models.IntegerField( null=True, max_length=4, choices=YEAR_CHOICES,", "['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' +", "__str__(self): return self.name class Day(models.Model): class Meta: unique_together = (('day', 'calendar')) ordering =", "+ str(i) + '_') for i in range(1, x + 1)] day =", "'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): # TODO:", "models.ForeignKey(Calendar, related_name=\"days\") DAY_CHOICES = lambda x: [(i, '_' + str(i) + '_') for", "*args, **kwargs): if not self.uuid: self.uuid = self.create_uuid() super(Calendar, self).save(*args, **kwargs) def existing_days(self):", "height), 'crop': True, 'upscale': True, 'detail': True, 'subject_location': self.original_image.subject_location }).url def get_image_large_url(self): #", "Meta: unique_together = (('day', 'calendar')) ordering = ['day', ] calendar = models.ForeignKey(Calendar, related_name=\"days\")" ]
[ "HAND = 0 HERO = 1 FIELD = 2 DECK = 3 GRAVEYARD", "1 FIELD = 2 DECK = 3 GRAVEYARD = 4 REMOVED = 5", "0 HERO = 1 FIELD = 2 DECK = 3 GRAVEYARD = 4", "HERO = 1 FIELD = 2 DECK = 3 GRAVEYARD = 4 REMOVED", "= 1 FIELD = 2 DECK = 3 GRAVEYARD = 4 REMOVED =", "<gh_stars>0 #!/usr/bin/python3 HAND = 0 HERO = 1 FIELD = 2 DECK =", "#!/usr/bin/python3 HAND = 0 HERO = 1 FIELD = 2 DECK = 3", "= 0 HERO = 1 FIELD = 2 DECK = 3 GRAVEYARD =" ]
[ "feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i] = temp total_time += feed_time[7][i]", "for j in range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i]", "rights reserved. # if __name__ == '__main__': while True: try: animal_no = int(input())", "in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in range(animal_no): for j", "# Author: <NAME> # Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # #", "<filename>src/Problem_7.py #!/usr/bin/env python # -*- coding: utf-8 -*- # # File Name: Problem_7.py", "-*- # # File Name: Problem_7.py # Project Name: WebLearn # Author: <NAME>", "python # -*- coding: utf-8 -*- # # File Name: Problem_7.py # Project", "Author: <NAME> # Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright", "Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019 # All rights reserved. #", "# Copyright (c) <NAME> 2019 # All rights reserved. # if __name__ ==", "j in range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i]", "')))) for i in range(animal_no): for j in range(7): if feed_time[j][i] < feed_time[j", "-*- coding: utf-8 -*- # # File Name: Problem_7.py # Project Name: WebLearn", "(c) <NAME> 2019 # All rights reserved. # if __name__ == '__main__': while", "# # File Name: Problem_7.py # Project Name: WebLearn # Author: <NAME> #", "02:04 # Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019 # All rights", "utf-8 -*- # # File Name: Problem_7.py # Project Name: WebLearn # Author:", "== '__main__': while True: try: animal_no = int(input()) feed_time = [] total_time =", "feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j +", "Problem_7.py # Project Name: WebLearn # Author: <NAME> # Created Time: 2019-01-13 02:04", "# if __name__ == '__main__': while True: try: animal_no = int(input()) feed_time =", "feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in range(animal_no): for j in range(7):", "range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in range(animal_no): for j in", "Name: Problem_7.py # Project Name: WebLearn # Author: <NAME> # Created Time: 2019-01-13", "'__main__': while True: try: animal_no = int(input()) feed_time = [] total_time = 0", "= 0 for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i", "feed_time[j + 1][i] feed_time[j + 1][i] = temp total_time += feed_time[7][i] print(total_time) except:", "input().split(' ')))) for i in range(animal_no): for j in range(7): if feed_time[j][i] <", "int(input()) feed_time = [] total_time = 0 for i in range(8): feed_time.append(list(map(lambda x:", "# File Name: Problem_7.py # Project Name: WebLearn # Author: <NAME> # Created", "[] total_time = 0 for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' '))))", "i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in range(animal_no): for", "1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i] =", "for i in range(animal_no): for j in range(7): if feed_time[j][i] < feed_time[j +", "temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i] = temp", "feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i] = temp total_time +=", "<NAME> # Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright (c)", "Copyright (c) <NAME> 2019 # All rights reserved. # if __name__ == '__main__':", "Name: WebLearn # Author: <NAME> # Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113", "# Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019 # All rights reserved.", "#!/usr/bin/env python # -*- coding: utf-8 -*- # # File Name: Problem_7.py #", "0 for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in", "All rights reserved. # if __name__ == '__main__': while True: try: animal_no =", "0.0.1.20190113 # # Copyright (c) <NAME> 2019 # All rights reserved. # if", "= int(input()) feed_time = [] total_time = 0 for i in range(8): feed_time.append(list(map(lambda", "Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019", "# All rights reserved. # if __name__ == '__main__': while True: try: animal_no", "<NAME> 2019 # All rights reserved. # if __name__ == '__main__': while True:", "int(x), input().split(' ')))) for i in range(animal_no): for j in range(7): if feed_time[j][i]", "Project Name: WebLearn # Author: <NAME> # Created Time: 2019-01-13 02:04 # Version:", "coding: utf-8 -*- # # File Name: Problem_7.py # Project Name: WebLearn #", "# # Copyright (c) <NAME> 2019 # All rights reserved. # if __name__", "while True: try: animal_no = int(input()) feed_time = [] total_time = 0 for", "for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for i in range(animal_no):", "WebLearn # Author: <NAME> # Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 #", "x: int(x), input().split(' ')))) for i in range(animal_no): for j in range(7): if", "i in range(animal_no): for j in range(7): if feed_time[j][i] < feed_time[j + 1][i]:", "2019 # All rights reserved. # if __name__ == '__main__': while True: try:", "# -*- coding: utf-8 -*- # # File Name: Problem_7.py # Project Name:", "__name__ == '__main__': while True: try: animal_no = int(input()) feed_time = [] total_time", "if feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j +", "in range(animal_no): for j in range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp", "= feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i] = temp total_time", "feed_time = [] total_time = 0 for i in range(8): feed_time.append(list(map(lambda x: int(x),", "+ 1][i] feed_time[j + 1][i] = temp total_time += feed_time[7][i] print(total_time) except: break", "animal_no = int(input()) feed_time = [] total_time = 0 for i in range(8):", "File Name: Problem_7.py # Project Name: WebLearn # Author: <NAME> # Created Time:", "try: animal_no = int(input()) feed_time = [] total_time = 0 for i in", "# Created Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright (c) <NAME>", "reserved. # if __name__ == '__main__': while True: try: animal_no = int(input()) feed_time", "# Project Name: WebLearn # Author: <NAME> # Created Time: 2019-01-13 02:04 #", "= feed_time[j + 1][i] feed_time[j + 1][i] = temp total_time += feed_time[7][i] print(total_time)", "range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j", "Time: 2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019 #", "if __name__ == '__main__': while True: try: animal_no = int(input()) feed_time = []", "True: try: animal_no = int(input()) feed_time = [] total_time = 0 for i", "2019-01-13 02:04 # Version: 0.0.1.20190113 # # Copyright (c) <NAME> 2019 # All", "+ 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j + 1][i]", "feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i]", "total_time = 0 for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split(' ')))) for", "in range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] =", "= [] total_time = 0 for i in range(8): feed_time.append(list(map(lambda x: int(x), input().split('", "< feed_time[j + 1][i]: temp = feed_time[j][i] feed_time[j][i] = feed_time[j + 1][i] feed_time[j", "range(animal_no): for j in range(7): if feed_time[j][i] < feed_time[j + 1][i]: temp =" ]
[]